2024-12-05 00:58:01,542 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-12-05 00:58:01,570 main DEBUG Took 0.025396 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-05 00:58:01,570 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-05 00:58:01,571 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-05 00:58:01,572 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-05 00:58:01,575 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,594 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-05 00:58:01,630 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,632 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,632 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,633 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,633 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,634 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,635 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,635 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,636 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,636 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,637 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,637 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,638 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,638 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,639 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,639 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,639 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,640 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,640 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,648 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,648 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,653 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,653 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,654 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 00:58:01,654 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,655 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-05 00:58:01,657 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 00:58:01,658 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-05 00:58:01,661 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-05 00:58:01,661 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-05 00:58:01,663 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-05 00:58:01,663 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-05 00:58:01,675 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-05 00:58:01,678 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-05 00:58:01,680 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-05 00:58:01,680 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-05 00:58:01,681 main DEBUG createAppenders(={Console}) 2024-12-05 00:58:01,682 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 initialized 2024-12-05 00:58:01,682 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-12-05 00:58:01,683 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 OK. 2024-12-05 00:58:01,683 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-05 00:58:01,684 main DEBUG OutputStream closed 2024-12-05 00:58:01,684 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-05 00:58:01,685 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-05 00:58:01,685 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@5a56cdac OK 2024-12-05 00:58:01,804 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-05 00:58:01,806 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-05 00:58:01,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-05 00:58:01,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-05 00:58:01,810 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-05 00:58:01,811 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-05 00:58:01,811 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-05 00:58:01,811 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-05 00:58:01,812 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-05 00:58:01,812 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-05 00:58:01,813 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-05 00:58:01,813 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-05 00:58:01,814 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-05 00:58:01,814 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-05 00:58:01,814 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-05 00:58:01,815 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-05 00:58:01,815 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-05 00:58:01,816 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-05 00:58:01,819 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05 00:58:01,820 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6f63b475) with optional ClassLoader: null 2024-12-05 00:58:01,820 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-05 00:58:01,821 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6f63b475] started OK. 2024-12-05T00:58:01,840 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-05 00:58:01,844 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-05 00:58:01,844 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05T00:58:02,366 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4 2024-12-05T00:58:02,367 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-05T00:58:02,425 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-05T00:58:02,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T00:58:02,696 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1, deleteOnExit=true 2024-12-05T00:58:02,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T00:58:02,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/test.cache.data in system properties and HBase conf 2024-12-05T00:58:02,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T00:58:02,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir in system properties and HBase conf 2024-12-05T00:58:02,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T00:58:02,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T00:58:02,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T00:58:02,830 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T00:58:02,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:58:02,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:58:02,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T00:58:02,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:58:02,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T00:58:02,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T00:58:02,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:58:02,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:58:02,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T00:58:02,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/nfs.dump.dir in system properties and HBase conf 2024-12-05T00:58:02,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/java.io.tmpdir in system properties and HBase conf 2024-12-05T00:58:02,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:58:02,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T00:58:02,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T00:58:04,083 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-05T00:58:04,192 INFO [Time-limited test {}] log.Log(170): Logging initialized @3743ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-05T00:58:04,306 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:04,415 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:58:04,466 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:58:04,466 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:58:04,468 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:58:04,488 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:04,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c3e3f70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:58:04,500 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15dced0e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:58:04,782 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@287bbdb7{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/java.io.tmpdir/jetty-localhost-38475-hadoop-hdfs-3_4_1-tests_jar-_-any-9838479268374964753/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T00:58:04,791 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:38475} 2024-12-05T00:58:04,791 INFO [Time-limited test {}] server.Server(415): Started @4344ms 2024-12-05T00:58:05,265 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:05,272 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:58:05,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:58:05,274 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:58:05,274 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:58:05,275 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c897929{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:58:05,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bf4a906{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:58:05,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ae93e7a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/java.io.tmpdir/jetty-localhost-36959-hadoop-hdfs-3_4_1-tests_jar-_-any-876350668566126582/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:58:05,411 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:36959} 2024-12-05T00:58:05,411 INFO [Time-limited test {}] server.Server(415): Started @4965ms 2024-12-05T00:58:05,476 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:58:05,649 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:05,660 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:58:05,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:58:05,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:58:05,671 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:58:05,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@621090f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:58:05,677 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c3f2c62{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:58:05,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76c02ba1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/java.io.tmpdir/jetty-localhost-37567-hadoop-hdfs-3_4_1-tests_jar-_-any-1313838354192751725/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:58:05,842 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:37567} 2024-12-05T00:58:05,843 INFO [Time-limited test {}] server.Server(415): Started @5396ms 2024-12-05T00:58:05,846 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:58:05,912 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:05,921 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:58:05,926 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:58:05,927 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:58:05,927 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:58:05,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26bad6ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:58:05,929 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6632e866{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T00:58:06,077 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77f27ab{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/java.io.tmpdir/jetty-localhost-37213-hadoop-hdfs-3_4_1-tests_jar-_-any-9822544372740163461/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T00:58:06,078 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:37213} 2024-12-05T00:58:06,078 INFO [Time-limited test {}] server.Server(415): Started @5632ms 2024-12-05T00:58:06,082 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T00:58:06,156 WARN [Thread-112 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3/current/BP-1741140594-172.17.0.2-1733360283702/current, will proceed with Du for space computation calculation, 2024-12-05T00:58:06,156 WARN [Thread-113 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1/current/BP-1741140594-172.17.0.2-1733360283702/current, will proceed with Du for space computation calculation, 2024-12-05T00:58:06,160 WARN [Thread-115 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2/current/BP-1741140594-172.17.0.2-1733360283702/current, will proceed with Du for space computation calculation, 2024-12-05T00:58:06,160 WARN [Thread-114 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4/current/BP-1741140594-172.17.0.2-1733360283702/current, will proceed with Du for space computation calculation, 2024-12-05T00:58:06,286 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:58:06,286 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:58:06,328 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5/current/BP-1741140594-172.17.0.2-1733360283702/current, will proceed with Du for space computation calculation, 2024-12-05T00:58:06,338 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6/current/BP-1741140594-172.17.0.2-1733360283702/current, will proceed with Du for space computation calculation, 2024-12-05T00:58:06,392 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7806e09c046bfe7 with lease ID 0x262ee09e23cf3882: Processing first storage report for DS-8bafa01e-8f9d-46ad-945f-175cedad9809 from datanode DatanodeRegistration(127.0.0.1:41667, datanodeUuid=7abf6def-a180-4e0f-a930-192612f670ca, infoPort=34391, infoSecurePort=0, ipcPort=41813, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702) 2024-12-05T00:58:06,394 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7806e09c046bfe7 with lease ID 0x262ee09e23cf3882: from storage DS-8bafa01e-8f9d-46ad-945f-175cedad9809 node DatanodeRegistration(127.0.0.1:41667, datanodeUuid=7abf6def-a180-4e0f-a930-192612f670ca, infoPort=34391, infoSecurePort=0, ipcPort=41813, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-05T00:58:06,394 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7806e09c046bfe7 with lease ID 0x262ee09e23cf3882: Processing first storage report for DS-33b8c4d2-0983-4156-a6f5-56b98fa2d849 from datanode DatanodeRegistration(127.0.0.1:41667, datanodeUuid=7abf6def-a180-4e0f-a930-192612f670ca, infoPort=34391, infoSecurePort=0, ipcPort=41813, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702) 2024-12-05T00:58:06,395 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7806e09c046bfe7 with lease ID 0x262ee09e23cf3882: from storage DS-33b8c4d2-0983-4156-a6f5-56b98fa2d849 node DatanodeRegistration(127.0.0.1:41667, datanodeUuid=7abf6def-a180-4e0f-a930-192612f670ca, infoPort=34391, infoSecurePort=0, ipcPort=41813, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T00:58:06,412 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x56eb3127eada504d with lease ID 0x262ee09e23cf3883: Processing first storage report for DS-ffcd1894-adb0-4c96-8de9-f5d2258e4ec3 from datanode DatanodeRegistration(127.0.0.1:37797, datanodeUuid=718b468a-c913-4cdb-9ac0-c9df2b7f06ef, infoPort=33543, infoSecurePort=0, ipcPort=39179, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702) 2024-12-05T00:58:06,412 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x56eb3127eada504d with lease ID 0x262ee09e23cf3883: from storage DS-ffcd1894-adb0-4c96-8de9-f5d2258e4ec3 node DatanodeRegistration(127.0.0.1:37797, datanodeUuid=718b468a-c913-4cdb-9ac0-c9df2b7f06ef, infoPort=33543, infoSecurePort=0, ipcPort=39179, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:58:06,413 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x56eb3127eada504d with lease ID 0x262ee09e23cf3883: Processing first storage report for DS-7fb126da-f650-4091-a80b-f4fabd55196e from datanode DatanodeRegistration(127.0.0.1:37797, datanodeUuid=718b468a-c913-4cdb-9ac0-c9df2b7f06ef, infoPort=33543, infoSecurePort=0, ipcPort=39179, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702) 2024-12-05T00:58:06,413 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x56eb3127eada504d with lease ID 0x262ee09e23cf3883: from storage DS-7fb126da-f650-4091-a80b-f4fabd55196e node DatanodeRegistration(127.0.0.1:37797, datanodeUuid=718b468a-c913-4cdb-9ac0-c9df2b7f06ef, infoPort=33543, infoSecurePort=0, ipcPort=39179, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:58:06,464 WARN [Thread-111 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T00:58:06,478 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc4289d76cd11a8ed with lease ID 0x262ee09e23cf3884: Processing first storage report for DS-8b9d97af-dfa8-4625-bfb7-06da988419bc from datanode DatanodeRegistration(127.0.0.1:46149, datanodeUuid=574b84e0-8ed8-4a2c-b3a3-bd6cf1f65bbb, infoPort=37943, infoSecurePort=0, ipcPort=42087, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702) 2024-12-05T00:58:06,479 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4289d76cd11a8ed with lease ID 0x262ee09e23cf3884: from storage DS-8b9d97af-dfa8-4625-bfb7-06da988419bc node DatanodeRegistration(127.0.0.1:46149, datanodeUuid=574b84e0-8ed8-4a2c-b3a3-bd6cf1f65bbb, infoPort=37943, infoSecurePort=0, ipcPort=42087, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:58:06,479 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc4289d76cd11a8ed with lease ID 0x262ee09e23cf3884: Processing first storage report for DS-28d23870-fed4-4487-b792-3b1f58bbe7d7 from datanode DatanodeRegistration(127.0.0.1:46149, datanodeUuid=574b84e0-8ed8-4a2c-b3a3-bd6cf1f65bbb, infoPort=37943, infoSecurePort=0, ipcPort=42087, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702) 2024-12-05T00:58:06,479 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4289d76cd11a8ed with lease ID 0x262ee09e23cf3884: from storage DS-28d23870-fed4-4487-b792-3b1f58bbe7d7 node DatanodeRegistration(127.0.0.1:46149, datanodeUuid=574b84e0-8ed8-4a2c-b3a3-bd6cf1f65bbb, infoPort=37943, infoSecurePort=0, ipcPort=42087, storageInfo=lv=-57;cid=testClusterID;nsid=766971145;c=1733360283702), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T00:58:06,701 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4 2024-12-05T00:58:06,837 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/zookeeper_0, clientPort=56893, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T00:58:06,851 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56893 2024-12-05T00:58:06,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:06,873 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:07,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:58:07,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:58:07,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741825_1001 (size=7) 2024-12-05T00:58:07,668 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 with version=8 2024-12-05T00:58:07,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/hbase-staging 2024-12-05T00:58:07,766 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-05T00:58:08,033 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:58:08,044 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,045 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,050 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:58:08,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:58:08,229 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T00:58:08,308 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-05T00:58:08,318 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-05T00:58:08,322 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:58:08,355 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 9215 (auto-detected) 2024-12-05T00:58:08,356 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-05T00:58:08,387 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33369 2024-12-05T00:58:08,419 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33369 connecting to ZooKeeper ensemble=127.0.0.1:56893 2024-12-05T00:58:08,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:333690x0, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:58:08,471 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33369-0x1018020e6da0000 connected 2024-12-05T00:58:08,546 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:08,553 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:08,571 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:58:08,576 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11, hbase.cluster.distributed=false 2024-12-05T00:58:08,615 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:58:08,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33369 2024-12-05T00:58:08,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33369 2024-12-05T00:58:08,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33369 2024-12-05T00:58:08,630 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33369 2024-12-05T00:58:08,631 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33369 2024-12-05T00:58:08,758 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:58:08,761 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,761 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,761 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:58:08,761 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,762 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:58:08,765 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:58:08,768 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:58:08,773 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32927 2024-12-05T00:58:08,775 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32927 connecting to ZooKeeper ensemble=127.0.0.1:56893 2024-12-05T00:58:08,777 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:08,781 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:08,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:329270x0, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:58:08,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329270x0, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:58:08,796 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:58:08,801 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32927-0x1018020e6da0001 connected 2024-12-05T00:58:08,810 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:58:08,813 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:58:08,820 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:58:08,825 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32927 2024-12-05T00:58:08,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32927 2024-12-05T00:58:08,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32927 2024-12-05T00:58:08,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32927 2024-12-05T00:58:08,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32927 2024-12-05T00:58:08,855 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:58:08,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,856 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:58:08,857 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,857 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:58:08,857 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:58:08,858 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:58:08,859 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44435 2024-12-05T00:58:08,861 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44435 connecting to ZooKeeper ensemble=127.0.0.1:56893 2024-12-05T00:58:08,862 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:08,865 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:08,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:444350x0, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:58:08,878 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44435-0x1018020e6da0002 connected 2024-12-05T00:58:08,878 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:58:08,879 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:58:08,880 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:58:08,881 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:58:08,883 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:58:08,886 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44435 2024-12-05T00:58:08,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44435 2024-12-05T00:58:08,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44435 2024-12-05T00:58:08,890 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44435 2024-12-05T00:58:08,890 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44435 2024-12-05T00:58:08,911 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2113c16e5528:0 server-side Connection retries=45 2024-12-05T00:58:08,911 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,912 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T00:58:08,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T00:58:08,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T00:58:08,913 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T00:58:08,913 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T00:58:08,914 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41191 2024-12-05T00:58:08,916 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41191 connecting to ZooKeeper ensemble=127.0.0.1:56893 2024-12-05T00:58:08,917 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:08,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:08,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:411910x0, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T00:58:08,926 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:411910x0, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T00:58:08,927 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T00:58:08,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41191-0x1018020e6da0003 connected 2024-12-05T00:58:08,932 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T00:58:08,933 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T00:58:08,935 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T00:58:08,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41191 2024-12-05T00:58:08,938 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41191 2024-12-05T00:58:08,939 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41191 2024-12-05T00:58:08,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41191 2024-12-05T00:58:08,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41191 2024-12-05T00:58:08,960 DEBUG [M:0;2113c16e5528:33369 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2113c16e5528:33369 2024-12-05T00:58:08,961 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2113c16e5528,33369,1733360287836 2024-12-05T00:58:08,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:58:08,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:58:08,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:58:08,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:58:08,974 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2113c16e5528,33369,1733360287836 2024-12-05T00:58:08,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:58:08,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:58:08,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T00:58:08,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:08,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:08,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:08,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,001 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T00:58:09,003 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2113c16e5528,33369,1733360287836 from backup master directory 2024-12-05T00:58:09,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2113c16e5528,33369,1733360287836 2024-12-05T00:58:09,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:58:09,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:58:09,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:58:09,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T00:58:09,008 WARN [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:58:09,008 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2113c16e5528,33369,1733360287836 2024-12-05T00:58:09,010 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-05T00:58:09,012 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-05T00:58:09,078 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/hbase.id] with ID: e24f6832-226f-46b1-8a64-09e1d59fa392 2024-12-05T00:58:09,078 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.tmp/hbase.id 2024-12-05T00:58:09,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:58:09,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:58:09,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741826_1002 (size=42) 2024-12-05T00:58:09,104 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.tmp/hbase.id]:[hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/hbase.id] 2024-12-05T00:58:09,163 INFO [master/2113c16e5528:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:09,169 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T00:58:09,189 INFO [master/2113c16e5528:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-05T00:58:09,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:58:09,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:58:09,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741827_1003 (size=196) 2024-12-05T00:58:09,239 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:58:09,241 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T00:58:09,260 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:58:09,264 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T00:58:09,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:58:09,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:58:09,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741828_1004 (size=1189) 2024-12-05T00:58:09,329 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/data/master/store 2024-12-05T00:58:09,346 WARN [IPC Server handler 4 on default port 37149 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T00:58:09,346 WARN [IPC Server handler 4 on default port 37149 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T00:58:09,346 WARN [IPC Server handler 4 on default port 37149 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T00:58:09,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:58:09,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:58:09,366 INFO [master/2113c16e5528:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-05T00:58:09,370 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:09,373 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T00:58:09,373 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:58:09,374 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:58:09,376 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T00:58:09,376 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:58:09,376 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T00:58:09,378 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733360289373Disabling compacts and flushes for region at 1733360289373Disabling writes for close at 1733360289376 (+3 ms)Writing region close event to WAL at 1733360289376Closed at 1733360289376 2024-12-05T00:58:09,381 WARN [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/data/master/store/.initializing 2024-12-05T00:58:09,381 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/WALs/2113c16e5528,33369,1733360287836 2024-12-05T00:58:09,392 INFO [master/2113c16e5528:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T00:58:09,409 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C33369%2C1733360287836, suffix=, logDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/WALs/2113c16e5528,33369,1733360287836, archiveDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/oldWALs, maxLogs=10 2024-12-05T00:58:09,434 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/WALs/2113c16e5528,33369,1733360287836/2113c16e5528%2C33369%2C1733360287836.1733360289414, exclude list is [], retry=0 2024-12-05T00:58:09,438 WARN [IPC Server handler 2 on default port 37149 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T00:58:09,438 WARN [IPC Server handler 2 on default port 37149 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T00:58:09,438 WARN [IPC Server handler 2 on default port 37149 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T00:58:09,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46149,DS-8b9d97af-dfa8-4625-bfb7-06da988419bc,DISK] 2024-12-05T00:58:09,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41667,DS-8bafa01e-8f9d-46ad-945f-175cedad9809,DISK] 2024-12-05T00:58:09,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-05T00:58:09,503 INFO [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/WALs/2113c16e5528,33369,1733360287836/2113c16e5528%2C33369%2C1733360287836.1733360289414 2024-12-05T00:58:09,504 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37943:37943),(127.0.0.1/127.0.0.1:34391:34391)] 2024-12-05T00:58:09,505 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:58:09,505 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:09,509 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,510 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,557 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T00:58:09,598 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:09,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:58:09,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,605 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T00:58:09,605 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:09,606 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:58:09,606 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,616 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T00:58:09,616 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:09,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:58:09,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,620 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T00:58:09,620 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:09,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:58:09,621 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,626 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,628 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,634 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,634 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,638 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:58:09,643 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T00:58:09,654 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:58:09,655 INFO [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71334419, jitterRate=0.06296567618846893}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:58:09,664 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733360289530Initializing all the Stores at 1733360289532 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360289533 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360289533Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360289534 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360289534Cleaning up temporary data from old regions at 1733360289634 (+100 ms)Region opened successfully at 1733360289664 (+30 ms) 2024-12-05T00:58:09,666 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T00:58:09,725 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78b4dcf5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:58:09,762 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T00:58:09,775 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T00:58:09,775 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T00:58:09,778 INFO [master/2113c16e5528:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T00:58:09,779 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-05T00:58:09,784 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-05T00:58:09,785 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T00:58:09,819 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T00:58:09,830 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T00:58:09,833 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T00:58:09,837 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T00:58:09,839 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T00:58:09,841 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T00:58:09,844 INFO [master/2113c16e5528:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T00:58:09,847 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T00:58:09,849 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T00:58:09,852 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T00:58:09,856 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T00:58:09,876 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T00:58:09,878 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T00:58:09,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:58:09,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:58:09,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:58:09,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T00:58:09,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,888 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2113c16e5528,33369,1733360287836, sessionid=0x1018020e6da0000, setting cluster-up flag (Was=false) 2024-12-05T00:58:09,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,911 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T00:58:09,914 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,33369,1733360287836 2024-12-05T00:58:09,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:09,931 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T00:58:09,933 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2113c16e5528,33369,1733360287836 2024-12-05T00:58:09,941 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T00:58:09,948 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(746): ClusterId : e24f6832-226f-46b1-8a64-09e1d59fa392 2024-12-05T00:58:09,948 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(746): ClusterId : e24f6832-226f-46b1-8a64-09e1d59fa392 2024-12-05T00:58:09,948 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(746): ClusterId : e24f6832-226f-46b1-8a64-09e1d59fa392 2024-12-05T00:58:09,951 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:58:09,951 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:58:09,951 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T00:58:09,957 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:58:09,957 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:58:09,957 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:58:09,957 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:58:09,959 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T00:58:09,959 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T00:58:09,961 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:58:09,962 DEBUG [RS:1;2113c16e5528:44435 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3857a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:58:09,968 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:58:09,970 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T00:58:09,970 DEBUG [RS:0;2113c16e5528:32927 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fff8f29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:58:09,971 DEBUG [RS:2;2113c16e5528:41191 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49082e8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2113c16e5528/172.17.0.2:0 2024-12-05T00:58:09,982 DEBUG [RS:1;2113c16e5528:44435 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2113c16e5528:44435 2024-12-05T00:58:09,987 INFO [RS:1;2113c16e5528:44435 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:58:09,987 INFO [RS:1;2113c16e5528:44435 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:58:09,991 DEBUG [RS:0;2113c16e5528:32927 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2113c16e5528:32927 2024-12-05T00:58:09,991 INFO [RS:0;2113c16e5528:32927 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:58:09,991 INFO [RS:0;2113c16e5528:32927 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:58:09,999 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-05T00:58:09,999 DEBUG [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-05T00:58:10,000 INFO [RS:1;2113c16e5528:44435 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T00:58:10,001 DEBUG [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:58:10,004 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,33369,1733360287836 with port=44435, startcode=1733360288855 2024-12-05T00:58:10,005 DEBUG [RS:2;2113c16e5528:41191 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2113c16e5528:41191 2024-12-05T00:58:10,005 INFO [RS:2;2113c16e5528:41191 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T00:58:10,005 INFO [RS:2;2113c16e5528:41191 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T00:58:10,005 DEBUG [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-05T00:58:10,005 DEBUG [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-05T00:58:10,006 INFO [RS:0;2113c16e5528:32927 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T00:58:10,006 INFO [RS:2;2113c16e5528:41191 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T00:58:10,006 DEBUG [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:58:10,006 DEBUG [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T00:58:10,007 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,33369,1733360287836 with port=41191, startcode=1733360288910 2024-12-05T00:58:10,009 INFO [master/2113c16e5528:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T00:58:10,009 INFO [master/2113c16e5528:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-05T00:58:10,021 DEBUG [RS:2;2113c16e5528:41191 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:58:10,023 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,33369,1733360287836 with port=32927, startcode=1733360288712 2024-12-05T00:58:10,024 DEBUG [RS:0;2113c16e5528:32927 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:58:10,025 DEBUG [RS:1;2113c16e5528:44435 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T00:58:10,101 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54797, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:58:10,101 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32913, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:58:10,102 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57329, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T00:58:10,110 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-05T00:58:10,117 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T00:58:10,118 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-05T00:58:10,119 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-05T00:58:10,134 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T00:58:10,145 INFO [master/2113c16e5528:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T00:58:10,152 DEBUG [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-05T00:58:10,152 DEBUG [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-05T00:58:10,152 DEBUG [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-05T00:58:10,153 WARN [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-05T00:58:10,153 WARN [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-05T00:58:10,153 WARN [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-05T00:58:10,154 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2113c16e5528,33369,1733360287836 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T00:58:10,170 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:58:10,170 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:58:10,171 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:58:10,171 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2113c16e5528:0, corePoolSize=5, maxPoolSize=5 2024-12-05T00:58:10,171 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2113c16e5528:0, corePoolSize=10, maxPoolSize=10 2024-12-05T00:58:10,171 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,171 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:58:10,171 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,186 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:58:10,187 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T00:58:10,195 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:10,195 INFO [PEWorker-2 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T00:58:10,200 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733360320200 2024-12-05T00:58:10,202 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T00:58:10,203 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T00:58:10,210 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T00:58:10,210 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T00:58:10,211 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T00:58:10,211 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T00:58:10,216 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,225 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T00:58:10,226 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T00:58:10,227 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T00:58:10,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:58:10,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:58:10,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741831_1007 (size=1321) 2024-12-05T00:58:10,232 INFO [PEWorker-2 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T00:58:10,232 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T00:58:10,233 INFO [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T00:58:10,233 INFO [PEWorker-2 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T00:58:10,235 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733360290235,5,FailOnTimeoutGroup] 2024-12-05T00:58:10,236 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733360290236,5,FailOnTimeoutGroup] 2024-12-05T00:58:10,236 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,237 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T00:58:10,238 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,238 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,254 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,33369,1733360287836 with port=41191, startcode=1733360288910 2024-12-05T00:58:10,254 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,33369,1733360287836 with port=44435, startcode=1733360288855 2024-12-05T00:58:10,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:58:10,255 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(2659): reportForDuty to master=2113c16e5528,33369,1733360287836 with port=32927, startcode=1733360288712 2024-12-05T00:58:10,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:58:10,257 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2113c16e5528,41191,1733360288910 2024-12-05T00:58:10,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741832_1008 (size=32) 2024-12-05T00:58:10,259 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369 {}] master.ServerManager(517): Registering regionserver=2113c16e5528,41191,1733360288910 2024-12-05T00:58:10,265 DEBUG [PEWorker-2 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:10,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:58:10,268 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2113c16e5528,44435,1733360288855 2024-12-05T00:58:10,268 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369 {}] master.ServerManager(517): Registering regionserver=2113c16e5528,44435,1733360288855 2024-12-05T00:58:10,269 DEBUG [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T00:58:10,269 DEBUG [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37149 2024-12-05T00:58:10,269 DEBUG [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:58:10,275 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:58:10,275 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:10,275 DEBUG [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T00:58:10,275 DEBUG [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37149 2024-12-05T00:58:10,275 DEBUG [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:58:10,275 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2113c16e5528,32927,1733360288712 2024-12-05T00:58:10,276 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369 {}] master.ServerManager(517): Registering regionserver=2113c16e5528,32927,1733360288712 2024-12-05T00:58:10,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:58:10,276 DEBUG [RS:2;2113c16e5528:41191 {}] zookeeper.ZKUtil(111): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2113c16e5528,41191,1733360288910 2024-12-05T00:58:10,277 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:58:10,277 WARN [RS:2;2113c16e5528:41191 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:58:10,277 INFO [RS:2;2113c16e5528:41191 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T00:58:10,277 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:58:10,277 DEBUG [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,41191,1733360288910 2024-12-05T00:58:10,281 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:58:10,282 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:10,282 DEBUG [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T00:58:10,282 DEBUG [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37149 2024-12-05T00:58:10,282 DEBUG [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T00:58:10,283 DEBUG [RS:1;2113c16e5528:44435 {}] zookeeper.ZKUtil(111): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2113c16e5528,44435,1733360288855 2024-12-05T00:58:10,284 WARN [RS:1;2113c16e5528:44435 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:58:10,284 INFO [RS:1;2113c16e5528:44435 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T00:58:10,284 DEBUG [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,44435,1733360288855 2024-12-05T00:58:10,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:58:10,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:58:10,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T00:58:10,287 DEBUG [RS:0;2113c16e5528:32927 {}] zookeeper.ZKUtil(111): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2113c16e5528,32927,1733360288712 2024-12-05T00:58:10,287 WARN [RS:0;2113c16e5528:32927 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T00:58:10,287 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2113c16e5528,41191,1733360288910] 2024-12-05T00:58:10,287 INFO [RS:0;2113c16e5528:32927 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T00:58:10,287 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2113c16e5528,44435,1733360288855] 2024-12-05T00:58:10,287 DEBUG [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,32927,1733360288712 2024-12-05T00:58:10,289 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2113c16e5528,32927,1733360288712] 2024-12-05T00:58:10,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:58:10,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:10,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:58:10,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:58:10,298 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:58:10,298 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:10,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:58:10,300 DEBUG [PEWorker-2 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:58:10,302 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740 2024-12-05T00:58:10,303 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740 2024-12-05T00:58:10,308 DEBUG [PEWorker-2 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:58:10,308 DEBUG [PEWorker-2 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:58:10,310 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:58:10,314 DEBUG [PEWorker-2 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:58:10,321 INFO [RS:1;2113c16e5528:44435 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:58:10,338 INFO [RS:2;2113c16e5528:41191 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:58:10,340 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:58:10,343 INFO [PEWorker-2 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59686885, jitterRate=-0.11059610545635223}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:58:10,347 DEBUG [PEWorker-2 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733360290265Initializing all the Stores at 1733360290267 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360290267Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360290268 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360290268Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360290268Cleaning up temporary data from old regions at 1733360290308 (+40 ms)Region opened successfully at 1733360290347 (+39 ms) 2024-12-05T00:58:10,347 DEBUG [PEWorker-2 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T00:58:10,347 INFO [PEWorker-2 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T00:58:10,347 DEBUG [PEWorker-2 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T00:58:10,348 INFO [RS:0;2113c16e5528:32927 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T00:58:10,348 DEBUG [PEWorker-2 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T00:58:10,348 DEBUG [PEWorker-2 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T00:58:10,354 INFO [RS:2;2113c16e5528:41191 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:58:10,354 INFO [RS:1;2113c16e5528:44435 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:58:10,361 INFO [RS:0;2113c16e5528:32927 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T00:58:10,361 INFO [PEWorker-2 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T00:58:10,362 DEBUG [PEWorker-2 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733360290347Disabling compacts and flushes for region at 1733360290347Disabling writes for close at 1733360290348 (+1 ms)Writing region close event to WAL at 1733360290361 (+13 ms)Closed at 1733360290361 2024-12-05T00:58:10,366 INFO [RS:1;2113c16e5528:44435 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:58:10,366 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,367 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:58:10,367 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T00:58:10,373 INFO [RS:2;2113c16e5528:41191 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:58:10,373 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,377 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T00:58:10,386 INFO [RS:0;2113c16e5528:32927 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T00:58:10,386 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,391 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:58:10,395 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T00:58:10,398 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:58:10,398 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:58:10,399 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T00:58:10,405 INFO [RS:0;2113c16e5528:32927 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:58:10,405 INFO [RS:1;2113c16e5528:44435 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:58:10,406 INFO [RS:2;2113c16e5528:41191 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T00:58:10,407 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,407 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,407 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,407 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,407 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,407 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,408 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,408 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,408 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,408 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,408 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,408 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,408 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,409 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:58:10,409 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,409 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,409 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,409 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,409 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,409 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:58:10,409 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,409 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,409 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,409 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,410 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,410 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2113c16e5528:0, corePoolSize=2, maxPoolSize=2 2024-12-05T00:58:10,410 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,410 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,410 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,410 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,410 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,410 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,410 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,410 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:58:10,410 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,410 DEBUG [RS:0;2113c16e5528:32927 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:58:10,411 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,411 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:58:10,411 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,411 DEBUG [RS:1;2113c16e5528:44435 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:58:10,411 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,411 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,412 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2113c16e5528:0, corePoolSize=1, maxPoolSize=1 2024-12-05T00:58:10,412 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:58:10,413 DEBUG [RS:2;2113c16e5528:41191 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2113c16e5528:0, corePoolSize=3, maxPoolSize=3 2024-12-05T00:58:10,428 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,429 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,429 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,429 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,429 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,429 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,32927,1733360288712-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:58:10,438 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,438 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,438 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,438 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,439 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,439 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,439 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,439 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,439 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,439 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,44435,1733360288855-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:58:10,439 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,439 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,41191,1733360288910-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:58:10,476 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:58:10,479 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,32927,1733360288712-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,480 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,480 INFO [RS:0;2113c16e5528:32927 {}] regionserver.Replication(171): 2113c16e5528,32927,1733360288712 started 2024-12-05T00:58:10,492 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:58:10,493 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,44435,1733360288855-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,494 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,494 INFO [RS:1;2113c16e5528:44435 {}] regionserver.Replication(171): 2113c16e5528,44435,1733360288855 started 2024-12-05T00:58:10,496 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T00:58:10,496 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,41191,1733360288910-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,496 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,496 INFO [RS:2;2113c16e5528:41191 {}] regionserver.Replication(171): 2113c16e5528,41191,1733360288910 started 2024-12-05T00:58:10,515 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,516 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(1482): Serving as 2113c16e5528,32927,1733360288712, RpcServer on 2113c16e5528/172.17.0.2:32927, sessionid=0x1018020e6da0001 2024-12-05T00:58:10,517 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:58:10,517 DEBUG [RS:0;2113c16e5528:32927 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2113c16e5528,32927,1733360288712 2024-12-05T00:58:10,517 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,32927,1733360288712' 2024-12-05T00:58:10,518 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:58:10,519 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:58:10,520 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:58:10,520 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:58:10,520 DEBUG [RS:0;2113c16e5528:32927 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2113c16e5528,32927,1733360288712 2024-12-05T00:58:10,520 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,32927,1733360288712' 2024-12-05T00:58:10,520 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:58:10,521 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:58:10,522 DEBUG [RS:0;2113c16e5528:32927 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:58:10,522 INFO [RS:0;2113c16e5528:32927 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:58:10,523 INFO [RS:0;2113c16e5528:32927 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:58:10,524 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,524 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(1482): Serving as 2113c16e5528,41191,1733360288910, RpcServer on 2113c16e5528/172.17.0.2:41191, sessionid=0x1018020e6da0003 2024-12-05T00:58:10,524 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:58:10,524 DEBUG [RS:2;2113c16e5528:41191 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2113c16e5528,41191,1733360288910 2024-12-05T00:58:10,525 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,41191,1733360288910' 2024-12-05T00:58:10,525 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:58:10,528 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:10,529 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(1482): Serving as 2113c16e5528,44435,1733360288855, RpcServer on 2113c16e5528/172.17.0.2:44435, sessionid=0x1018020e6da0002 2024-12-05T00:58:10,529 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T00:58:10,529 DEBUG [RS:1;2113c16e5528:44435 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2113c16e5528,44435,1733360288855 2024-12-05T00:58:10,529 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,44435,1733360288855' 2024-12-05T00:58:10,531 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T00:58:10,528 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:58:10,532 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T00:58:10,533 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:58:10,533 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:58:10,533 DEBUG [RS:1;2113c16e5528:44435 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2113c16e5528,44435,1733360288855 2024-12-05T00:58:10,533 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,44435,1733360288855' 2024-12-05T00:58:10,534 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:58:10,535 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T00:58:10,535 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T00:58:10,535 DEBUG [RS:2;2113c16e5528:41191 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2113c16e5528,41191,1733360288910 2024-12-05T00:58:10,535 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2113c16e5528,41191,1733360288910' 2024-12-05T00:58:10,535 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T00:58:10,536 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:58:10,536 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T00:58:10,537 DEBUG [RS:1;2113c16e5528:44435 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:58:10,537 DEBUG [RS:2;2113c16e5528:41191 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T00:58:10,537 INFO [RS:1;2113c16e5528:44435 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:58:10,537 INFO [RS:2;2113c16e5528:41191 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T00:58:10,537 INFO [RS:1;2113c16e5528:44435 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:58:10,537 INFO [RS:2;2113c16e5528:41191 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T00:58:10,547 WARN [2113c16e5528:33369 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T00:58:10,631 INFO [RS:0;2113c16e5528:32927 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T00:58:10,634 INFO [RS:0;2113c16e5528:32927 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C32927%2C1733360288712, suffix=, logDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,32927,1733360288712, archiveDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/oldWALs, maxLogs=32 2024-12-05T00:58:10,638 INFO [RS:2;2113c16e5528:41191 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T00:58:10,638 INFO [RS:1;2113c16e5528:44435 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T00:58:10,647 INFO [RS:1;2113c16e5528:44435 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C44435%2C1733360288855, suffix=, logDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,44435,1733360288855, archiveDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/oldWALs, maxLogs=32 2024-12-05T00:58:10,651 INFO [RS:2;2113c16e5528:41191 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C41191%2C1733360288910, suffix=, logDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,41191,1733360288910, archiveDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/oldWALs, maxLogs=32 2024-12-05T00:58:10,666 DEBUG [RS:0;2113c16e5528:32927 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,32927,1733360288712/2113c16e5528%2C32927%2C1733360288712.1733360290637, exclude list is [], retry=0 2024-12-05T00:58:10,674 DEBUG [RS:1;2113c16e5528:44435 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,44435,1733360288855/2113c16e5528%2C44435%2C1733360288855.1733360290649, exclude list is [], retry=0 2024-12-05T00:58:10,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41667,DS-8bafa01e-8f9d-46ad-945f-175cedad9809,DISK] 2024-12-05T00:58:10,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46149,DS-8b9d97af-dfa8-4625-bfb7-06da988419bc,DISK] 2024-12-05T00:58:10,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37797,DS-ffcd1894-adb0-4c96-8de9-f5d2258e4ec3,DISK] 2024-12-05T00:58:10,680 DEBUG [RS:2;2113c16e5528:41191 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,41191,1733360288910/2113c16e5528%2C41191%2C1733360288910.1733360290654, exclude list is [], retry=0 2024-12-05T00:58:10,681 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46149,DS-8b9d97af-dfa8-4625-bfb7-06da988419bc,DISK] 2024-12-05T00:58:10,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41667,DS-8bafa01e-8f9d-46ad-945f-175cedad9809,DISK] 2024-12-05T00:58:10,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37797,DS-ffcd1894-adb0-4c96-8de9-f5d2258e4ec3,DISK] 2024-12-05T00:58:10,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37797,DS-ffcd1894-adb0-4c96-8de9-f5d2258e4ec3,DISK] 2024-12-05T00:58:10,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46149,DS-8b9d97af-dfa8-4625-bfb7-06da988419bc,DISK] 2024-12-05T00:58:10,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41667,DS-8bafa01e-8f9d-46ad-945f-175cedad9809,DISK] 2024-12-05T00:58:10,756 INFO [RS:1;2113c16e5528:44435 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,44435,1733360288855/2113c16e5528%2C44435%2C1733360288855.1733360290649 2024-12-05T00:58:10,756 INFO [RS:0;2113c16e5528:32927 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,32927,1733360288712/2113c16e5528%2C32927%2C1733360288712.1733360290637 2024-12-05T00:58:10,758 DEBUG [RS:0;2113c16e5528:32927 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34391:34391),(127.0.0.1/127.0.0.1:33543:33543),(127.0.0.1/127.0.0.1:37943:37943)] 2024-12-05T00:58:10,759 DEBUG [RS:1;2113c16e5528:44435 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34391:34391),(127.0.0.1/127.0.0.1:37943:37943),(127.0.0.1/127.0.0.1:33543:33543)] 2024-12-05T00:58:10,759 INFO [RS:2;2113c16e5528:41191 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,41191,1733360288910/2113c16e5528%2C41191%2C1733360288910.1733360290654 2024-12-05T00:58:10,759 DEBUG [RS:2;2113c16e5528:41191 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33543:33543),(127.0.0.1/127.0.0.1:37943:37943),(127.0.0.1/127.0.0.1:34391:34391)] 2024-12-05T00:58:10,801 DEBUG [2113c16e5528:33369 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-05T00:58:10,821 DEBUG [2113c16e5528:33369 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T00:58:10,830 DEBUG [2113c16e5528:33369 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T00:58:10,830 DEBUG [2113c16e5528:33369 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T00:58:10,830 DEBUG [2113c16e5528:33369 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T00:58:10,830 DEBUG [2113c16e5528:33369 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T00:58:10,830 DEBUG [2113c16e5528:33369 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T00:58:10,830 DEBUG [2113c16e5528:33369 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T00:58:10,830 INFO [2113c16e5528:33369 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T00:58:10,830 INFO [2113c16e5528:33369 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T00:58:10,830 INFO [2113c16e5528:33369 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T00:58:10,830 DEBUG [2113c16e5528:33369 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T00:58:10,839 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:10,847 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,32927,1733360288712, state=OPENING 2024-12-05T00:58:10,853 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T00:58:10,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:10,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:10,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:10,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:10,860 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:58:10,860 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:58:10,861 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:58:10,861 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:58:10,862 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T00:58:10,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,32927,1733360288712}] 2024-12-05T00:58:11,043 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:58:11,045 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51503, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:58:11,059 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T00:58:11,059 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T00:58:11,060 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-05T00:58:11,063 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C32927%2C1733360288712.meta, suffix=.meta, logDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,32927,1733360288712, archiveDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/oldWALs, maxLogs=32 2024-12-05T00:58:11,079 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,32927,1733360288712/2113c16e5528%2C32927%2C1733360288712.meta.1733360291065.meta, exclude list is [], retry=0 2024-12-05T00:58:11,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41667,DS-8bafa01e-8f9d-46ad-945f-175cedad9809,DISK] 2024-12-05T00:58:11,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46149,DS-8b9d97af-dfa8-4625-bfb7-06da988419bc,DISK] 2024-12-05T00:58:11,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37797,DS-ffcd1894-adb0-4c96-8de9-f5d2258e4ec3,DISK] 2024-12-05T00:58:11,088 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,32927,1733360288712/2113c16e5528%2C32927%2C1733360288712.meta.1733360291065.meta 2024-12-05T00:58:11,088 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34391:34391),(127.0.0.1/127.0.0.1:33543:33543),(127.0.0.1/127.0.0.1:37943:37943)] 2024-12-05T00:58:11,088 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:58:11,090 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-05T00:58:11,091 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T00:58:11,092 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T00:58:11,094 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T00:58:11,095 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T00:58:11,103 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T00:58:11,104 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:11,104 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T00:58:11,104 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T00:58:11,108 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T00:58:11,110 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T00:58:11,110 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:11,111 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:58:11,112 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T00:58:11,113 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T00:58:11,114 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:11,114 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:58:11,115 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T00:58:11,116 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T00:58:11,116 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:11,117 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:58:11,117 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T00:58:11,118 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T00:58:11,118 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:11,119 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T00:58:11,119 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T00:58:11,121 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740 2024-12-05T00:58:11,124 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740 2024-12-05T00:58:11,127 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T00:58:11,127 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T00:58:11,128 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T00:58:11,131 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T00:58:11,133 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70750397, jitterRate=0.05426307022571564}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T00:58:11,134 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T00:58:11,138 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733360291105Writing region info on filesystem at 1733360291105Initializing all the Stores at 1733360291107 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360291107Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360291108 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360291108Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360291108Cleaning up temporary data from old regions at 1733360291127 (+19 ms)Running coprocessor post-open hooks at 1733360291134 (+7 ms)Region opened successfully at 1733360291138 (+4 ms) 2024-12-05T00:58:11,147 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733360291033 2024-12-05T00:58:11,161 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T00:58:11,161 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T00:58:11,163 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:11,166 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,32927,1733360288712, state=OPEN 2024-12-05T00:58:11,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:58:11,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:58:11,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:58:11,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T00:58:11,169 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:58:11,169 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:58:11,169 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:58:11,169 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T00:58:11,169 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2113c16e5528,32927,1733360288712 2024-12-05T00:58:11,177 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T00:58:11,177 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,32927,1733360288712 in 306 msec 2024-12-05T00:58:11,185 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T00:58:11,185 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 803 msec 2024-12-05T00:58:11,189 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T00:58:11,189 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T00:58:11,214 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:58:11,215 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T00:58:11,238 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:58:11,241 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55999, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:58:11,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2490 sec 2024-12-05T00:58:11,267 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733360291267, completionTime=-1 2024-12-05T00:58:11,270 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-05T00:58:11,271 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T00:58:11,313 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-05T00:58:11,313 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733360351313 2024-12-05T00:58:11,313 INFO [master/2113c16e5528:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733360411313 2024-12-05T00:58:11,314 INFO [master/2113c16e5528:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 42 msec 2024-12-05T00:58:11,318 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-05T00:58:11,326 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,33369,1733360287836-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:11,326 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,33369,1733360287836-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:11,327 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,33369,1733360287836-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:11,328 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2113c16e5528:33369, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:11,329 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:11,330 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:11,337 DEBUG [master/2113c16e5528:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T00:58:11,362 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.354sec 2024-12-05T00:58:11,364 INFO [master/2113c16e5528:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T00:58:11,366 INFO [master/2113c16e5528:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T00:58:11,367 INFO [master/2113c16e5528:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T00:58:11,367 INFO [master/2113c16e5528:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T00:58:11,367 INFO [master/2113c16e5528:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T00:58:11,368 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,33369,1733360287836-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T00:58:11,369 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,33369,1733360287836-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T00:58:11,406 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T00:58:11,406 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 2113c16e5528,33369,1733360287836 2024-12-05T00:58:11,411 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@32d62470 2024-12-05T00:58:11,413 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T00:58:11,415 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57353, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T00:58:11,422 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T00:58:11,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-05T00:58:11,437 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T00:58:11,438 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:11,440 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-05T00:58:11,441 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T00:58:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:58:11,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e2c4e04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:11,477 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-05T00:58:11,477 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-05T00:58:11,482 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T00:58:11,486 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:58:11,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741837_1013 (size=349) 2024-12-05T00:58:11,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741837_1013 (size=349) 2024-12-05T00:58:11,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741837_1013 (size=349) 2024-12-05T00:58:11,501 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T00:58:11,502 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 29254d26df66b48412cf870ed83cb900, NAME => 'hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T00:58:11,504 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:58:11,505 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T00:58:11,505 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@822949a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:11,505 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T00:58:11,509 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:58:11,518 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:11,520 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51784, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:58:11,525 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63be841e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:11,526 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:58:11,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741838_1014 (size=36) 2024-12-05T00:58:11,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741838_1014 (size=36) 2024-12-05T00:58:11,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741838_1014 (size=36) 2024-12-05T00:58:11,538 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:11,538 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 29254d26df66b48412cf870ed83cb900, disabling compactions & flushes 2024-12-05T00:58:11,538 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T00:58:11,538 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T00:58:11,538 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. after waiting 0 ms 2024-12-05T00:58:11,539 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T00:58:11,539 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T00:58:11,539 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 29254d26df66b48412cf870ed83cb900: Waiting for close lock at 1733360291538Disabling compacts and flushes for region at 1733360291538Disabling writes for close at 1733360291538Writing region close event to WAL at 1733360291539 (+1 ms)Closed at 1733360291539 2024-12-05T00:58:11,544 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T00:58:11,545 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T00:58:11,546 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:58:11,550 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733360291545"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360291545"}]},"ts":"1733360291545"} 2024-12-05T00:58:11,558 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T00:58:11,561 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T00:58:11,561 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44536, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:58:11,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2113c16e5528,33369,1733360287836 2024-12-05T00:58:11,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-05T00:58:11,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/test.cache.data in system properties and HBase conf 2024-12-05T00:58:11,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T00:58:11,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir in system properties and HBase conf 2024-12-05T00:58:11,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T00:58:11,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T00:58:11,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T00:58:11,564 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360291561"}]},"ts":"1733360291561"} 2024-12-05T00:58:11,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:58:11,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/nfs.dump.dir in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/java.io.tmpdir in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T00:58:11,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T00:58:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:58:11,579 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-05T00:58:11,580 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T00:58:11,582 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T00:58:11,582 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T00:58:11,582 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T00:58:11,582 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T00:58:11,582 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T00:58:11,582 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T00:58:11,582 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T00:58:11,582 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T00:58:11,582 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T00:58:11,582 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T00:58:11,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=29254d26df66b48412cf870ed83cb900, ASSIGN}] 2024-12-05T00:58:11,587 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=29254d26df66b48412cf870ed83cb900, ASSIGN 2024-12-05T00:58:11,590 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=29254d26df66b48412cf870ed83cb900, ASSIGN; state=OFFLINE, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T00:58:11,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741839_1015 (size=592039) 2024-12-05T00:58:11,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741839_1015 (size=592039) 2024-12-05T00:58:11,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741839_1015 (size=592039) 2024-12-05T00:58:11,744 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T00:58:11,745 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=29254d26df66b48412cf870ed83cb900, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:11,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=29254d26df66b48412cf870ed83cb900, ASSIGN because future has completed 2024-12-05T00:58:11,776 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29254d26df66b48412cf870ed83cb900, server=2113c16e5528,32927,1733360288712}] 2024-12-05T00:58:11,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:58:11,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741840_1016 (size=1663647) 2024-12-05T00:58:11,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741840_1016 (size=1663647) 2024-12-05T00:58:11,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741840_1016 (size=1663647) 2024-12-05T00:58:11,961 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T00:58:11,987 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 29254d26df66b48412cf870ed83cb900, NAME => 'hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:58:11,988 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. service=AccessControlService 2024-12-05T00:58:11,988 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T00:58:11,988 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 29254d26df66b48412cf870ed83cb900 2024-12-05T00:58:11,989 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:11,989 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 29254d26df66b48412cf870ed83cb900 2024-12-05T00:58:11,989 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 29254d26df66b48412cf870ed83cb900 2024-12-05T00:58:11,992 INFO [StoreOpener-29254d26df66b48412cf870ed83cb900-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 29254d26df66b48412cf870ed83cb900 2024-12-05T00:58:12,022 INFO [StoreOpener-29254d26df66b48412cf870ed83cb900-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29254d26df66b48412cf870ed83cb900 columnFamilyName l 2024-12-05T00:58:12,023 DEBUG [StoreOpener-29254d26df66b48412cf870ed83cb900-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:12,024 INFO [StoreOpener-29254d26df66b48412cf870ed83cb900-1 {}] regionserver.HStore(327): Store=29254d26df66b48412cf870ed83cb900/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:58:12,024 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 29254d26df66b48412cf870ed83cb900 2024-12-05T00:58:12,026 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900 2024-12-05T00:58:12,027 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900 2024-12-05T00:58:12,028 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 29254d26df66b48412cf870ed83cb900 2024-12-05T00:58:12,028 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 29254d26df66b48412cf870ed83cb900 2024-12-05T00:58:12,032 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 29254d26df66b48412cf870ed83cb900 2024-12-05T00:58:12,043 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:58:12,044 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 29254d26df66b48412cf870ed83cb900; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61634719, jitterRate=-0.08157111704349518}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:58:12,044 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 29254d26df66b48412cf870ed83cb900 2024-12-05T00:58:12,046 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 29254d26df66b48412cf870ed83cb900: Running coprocessor pre-open hook at 1733360291989Writing region info on filesystem at 1733360291989Initializing all the Stores at 1733360291991 (+2 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360291992 (+1 ms)Cleaning up temporary data from old regions at 1733360292028 (+36 ms)Running coprocessor post-open hooks at 1733360292044 (+16 ms)Region opened successfully at 1733360292046 (+2 ms) 2024-12-05T00:58:12,049 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., pid=6, masterSystemTime=1733360291942 2024-12-05T00:58:12,054 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T00:58:12,055 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T00:58:12,056 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=29254d26df66b48412cf870ed83cb900, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:12,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29254d26df66b48412cf870ed83cb900, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T00:58:12,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T00:58:12,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 29254d26df66b48412cf870ed83cb900, server=2113c16e5528,32927,1733360288712 in 305 msec 2024-12-05T00:58:12,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:58:12,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T00:58:12,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=29254d26df66b48412cf870ed83cb900, ASSIGN in 503 msec 2024-12-05T00:58:12,094 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T00:58:12,095 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360292094"}]},"ts":"1733360292094"} 2024-12-05T00:58:12,099 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-05T00:58:12,101 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T00:58:12,106 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 676 msec 2024-12-05T00:58:12,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741829_1005 (size=34) 2024-12-05T00:58:12,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T00:58:12,604 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-05T00:58:12,646 DEBUG [master/2113c16e5528:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T00:58:12,648 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T00:58:12,648 INFO [master/2113c16e5528:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2113c16e5528,33369,1733360287836-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T00:58:13,608 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:13,733 WARN [Thread-381 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:13,976 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:58:13,976 WARN [Thread-381 {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-05T00:58:13,977 INFO [Thread-381 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:58:13,992 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:58:13,992 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:58:13,992 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:58:13,993 INFO [Thread-381 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:58:13,993 INFO [Thread-381 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:58:13,993 INFO [Thread-381 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:58:13,995 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:13,996 INFO [Thread-381 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c55a185{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:58:13,996 INFO [Thread-381 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@191ba172{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-05T00:58:14,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c85584d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:58:14,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@269a259d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-05T00:58:14,180 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class 2024-12-05T00:58:14,181 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class 2024-12-05T00:58:14,181 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-05T00:58:14,183 INFO [Thread-381 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-05T00:58:14,254 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T00:58:14,492 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T00:58:14,783 INFO [Thread-381 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest" 2024-12-05T00:58:14,804 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18514ef4{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/java.io.tmpdir/jetty-localhost-40565-hadoop-yarn-common-3_4_1_jar-_-any-6015022720650673987/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-05T00:58:14,804 INFO [Thread-381 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ca5085a{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/java.io.tmpdir/jetty-localhost-40967-hadoop-yarn-common-3_4_1_jar-_-any-7497860915865348584/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-05T00:58:14,805 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3a9525b1{HTTP/1.1, (http/1.1)}{localhost:40565} 2024-12-05T00:58:14,805 INFO [Time-limited test {}] server.Server(415): Started @14358ms 2024-12-05T00:58:14,805 INFO [Thread-381 {}] server.AbstractConnector(333): Started ServerConnector@613db7da{HTTP/1.1, (http/1.1)}{localhost:40967} 2024-12-05T00:58:14,805 INFO [Thread-381 {}] server.Server(415): Started @14359ms 2024-12-05T00:58:15,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741841_1017 (size=5) 2024-12-05T00:58:15,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741841_1017 (size=5) 2024-12-05T00:58:15,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741841_1017 (size=5) 2024-12-05T00:58:15,900 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-05T00:58:15,907 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:15,972 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-05T00:58:15,973 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:58:15,997 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:58:15,997 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:58:15,997 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T00:58:16,005 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:16,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a3e1db4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:58:16,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6417a2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-05T00:58:16,132 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-05T00:58:16,132 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-05T00:58:16,132 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-05T00:58:16,132 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-05T00:58:16,144 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T00:58:16,217 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T00:58:16,364 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T00:58:16,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5419c6a1{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/java.io.tmpdir/jetty-localhost-44633-hadoop-yarn-common-3_4_1_jar-_-any-1349921303975632387/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-05T00:58:16,378 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6af26d81{HTTP/1.1, (http/1.1)}{localhost:44633} 2024-12-05T00:58:16,378 INFO [Time-limited test {}] server.Server(415): Started @15931ms 2024-12-05T00:58:16,638 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T00:58:16,852 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-05T00:58:16,855 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:16,856 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T00:58:16,860 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-05T00:58:16,887 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-05T00:58:16,888 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T00:58:16,902 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T00:58:16,902 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T00:58:16,902 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T00:58:16,905 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T00:58:16,906 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1042ee3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,AVAILABLE} 2024-12-05T00:58:16,907 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12959846{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-05T00:58:16,979 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-05T00:58:16,979 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-05T00:58:16,979 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-05T00:58:16,980 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-05T00:58:16,991 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T00:58:17,005 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T00:58:17,121 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T00:58:17,125 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@375a1889{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/java.io.tmpdir/jetty-localhost-40679-hadoop-yarn-common-3_4_1_jar-_-any-9890716643787942672/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-05T00:58:17,126 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5892eb52{HTTP/1.1, (http/1.1)}{localhost:40679} 2024-12-05T00:58:17,126 INFO [Time-limited test {}] server.Server(415): Started @16679ms 2024-12-05T00:58:17,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-05T00:58:17,161 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:58:17,195 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=715, OpenFileDescriptor=771, MaxFileDescriptor=1048576, SystemLoadAverage=359, ProcessCount=11, AvailableMemoryMB=9811 2024-12-05T00:58:17,197 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=715 is superior to 500 2024-12-05T00:58:17,201 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T00:58:17,206 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 2113c16e5528,33369,1733360287836 2024-12-05T00:58:17,206 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@67da4f42 2024-12-05T00:58:17,207 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T00:58:17,211 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51790, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T00:58:17,213 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:58:17,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:17,217 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T00:58:17,219 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-05T00:58:17,219 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:17,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:58:17,221 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T00:58:17,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741842_1018 (size=422) 2024-12-05T00:58:17,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741842_1018 (size=422) 2024-12-05T00:58:17,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741842_1018 (size=422) 2024-12-05T00:58:17,251 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ed60f0bae8dcbefaa675e8882731c906, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T00:58:17,252 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5033a9c1e8deb7792e7f44f66f21f1f0, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T00:58:17,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741843_1019 (size=83) 2024-12-05T00:58:17,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741843_1019 (size=83) 2024-12-05T00:58:17,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741843_1019 (size=83) 2024-12-05T00:58:17,315 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:17,315 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 5033a9c1e8deb7792e7f44f66f21f1f0, disabling compactions & flushes 2024-12-05T00:58:17,315 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T00:58:17,315 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T00:58:17,315 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. after waiting 0 ms 2024-12-05T00:58:17,315 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T00:58:17,316 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T00:58:17,316 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5033a9c1e8deb7792e7f44f66f21f1f0: Waiting for close lock at 1733360297315Disabling compacts and flushes for region at 1733360297315Disabling writes for close at 1733360297315Writing region close event to WAL at 1733360297315Closed at 1733360297315 2024-12-05T00:58:17,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741844_1020 (size=83) 2024-12-05T00:58:17,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741844_1020 (size=83) 2024-12-05T00:58:17,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741844_1020 (size=83) 2024-12-05T00:58:17,330 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:17,331 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing ed60f0bae8dcbefaa675e8882731c906, disabling compactions & flushes 2024-12-05T00:58:17,331 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:17,331 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:17,331 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. after waiting 0 ms 2024-12-05T00:58:17,331 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:17,331 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:17,331 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for ed60f0bae8dcbefaa675e8882731c906: Waiting for close lock at 1733360297331Disabling compacts and flushes for region at 1733360297331Disabling writes for close at 1733360297331Writing region close event to WAL at 1733360297331Closed at 1733360297331 2024-12-05T00:58:17,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:58:17,334 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T00:58:17,335 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733360297334"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360297334"}]},"ts":"1733360297334"} 2024-12-05T00:58:17,335 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733360297334"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360297334"}]},"ts":"1733360297334"} 2024-12-05T00:58:17,388 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T00:58:17,391 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T00:58:17,392 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360297391"}]},"ts":"1733360297391"} 2024-12-05T00:58:17,401 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-05T00:58:17,401 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T00:58:17,404 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T00:58:17,404 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T00:58:17,404 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T00:58:17,404 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T00:58:17,404 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T00:58:17,404 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T00:58:17,404 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T00:58:17,404 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T00:58:17,404 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T00:58:17,404 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T00:58:17,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ed60f0bae8dcbefaa675e8882731c906, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5033a9c1e8deb7792e7f44f66f21f1f0, ASSIGN}] 2024-12-05T00:58:17,409 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ed60f0bae8dcbefaa675e8882731c906, ASSIGN 2024-12-05T00:58:17,410 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5033a9c1e8deb7792e7f44f66f21f1f0, ASSIGN 2024-12-05T00:58:17,417 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5033a9c1e8deb7792e7f44f66f21f1f0, ASSIGN; state=OFFLINE, location=2113c16e5528,41191,1733360288910; forceNewPlan=false, retain=false 2024-12-05T00:58:17,417 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ed60f0bae8dcbefaa675e8882731c906, ASSIGN; state=OFFLINE, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T00:58:17,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:58:17,568 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T00:58:17,568 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=5033a9c1e8deb7792e7f44f66f21f1f0, regionState=OPENING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T00:58:17,568 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=ed60f0bae8dcbefaa675e8882731c906, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:17,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5033a9c1e8deb7792e7f44f66f21f1f0, ASSIGN because future has completed 2024-12-05T00:58:17,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0, server=2113c16e5528,41191,1733360288910}] 2024-12-05T00:58:17,581 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ed60f0bae8dcbefaa675e8882731c906, ASSIGN because future has completed 2024-12-05T00:58:17,582 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed60f0bae8dcbefaa675e8882731c906, server=2113c16e5528,32927,1733360288712}] 2024-12-05T00:58:17,735 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:58:17,745 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:17,746 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => ed60f0bae8dcbefaa675e8882731c906, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T00:58:17,746 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. service=AccessControlService 2024-12-05T00:58:17,746 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T00:58:17,747 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:17,747 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:17,747 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:17,747 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:17,751 INFO [StoreOpener-ed60f0bae8dcbefaa675e8882731c906-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:17,754 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56865, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:58:17,754 INFO [StoreOpener-ed60f0bae8dcbefaa675e8882731c906-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed60f0bae8dcbefaa675e8882731c906 columnFamilyName cf 2024-12-05T00:58:17,754 DEBUG [StoreOpener-ed60f0bae8dcbefaa675e8882731c906-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:17,755 INFO [StoreOpener-ed60f0bae8dcbefaa675e8882731c906-1 {}] regionserver.HStore(327): Store=ed60f0bae8dcbefaa675e8882731c906/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:58:17,755 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:17,756 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:17,757 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:17,758 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:17,758 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:17,760 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T00:58:17,761 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 5033a9c1e8deb7792e7f44f66f21f1f0, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T00:58:17,761 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. service=AccessControlService 2024-12-05T00:58:17,761 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T00:58:17,762 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:17,762 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:17,762 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:17,762 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:17,763 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:17,765 INFO [StoreOpener-5033a9c1e8deb7792e7f44f66f21f1f0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:17,768 INFO [StoreOpener-5033a9c1e8deb7792e7f44f66f21f1f0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5033a9c1e8deb7792e7f44f66f21f1f0 columnFamilyName cf 2024-12-05T00:58:17,768 DEBUG [StoreOpener-5033a9c1e8deb7792e7f44f66f21f1f0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:17,768 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:58:17,770 INFO [StoreOpener-5033a9c1e8deb7792e7f44f66f21f1f0-1 {}] regionserver.HStore(327): Store=5033a9c1e8deb7792e7f44f66f21f1f0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:58:17,770 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened ed60f0bae8dcbefaa675e8882731c906; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72399953, jitterRate=0.0788433700799942}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:58:17,770 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:17,772 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for ed60f0bae8dcbefaa675e8882731c906: Running coprocessor pre-open hook at 1733360297747Writing region info on filesystem at 1733360297747Initializing all the Stores at 1733360297749 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360297749Cleaning up temporary data from old regions at 1733360297758 (+9 ms)Running coprocessor post-open hooks at 1733360297770 (+12 ms)Region opened successfully at 1733360297772 (+2 ms) 2024-12-05T00:58:17,773 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:17,774 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:17,775 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:17,776 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:17,776 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:17,781 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906., pid=11, masterSystemTime=1733360297737 2024-12-05T00:58:17,784 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:17,784 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:17,785 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:17,786 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=ed60f0bae8dcbefaa675e8882731c906, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:17,789 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:58:17,790 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 5033a9c1e8deb7792e7f44f66f21f1f0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70741146, jitterRate=0.05412521958351135}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:58:17,790 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:17,790 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 5033a9c1e8deb7792e7f44f66f21f1f0: Running coprocessor pre-open hook at 1733360297762Writing region info on filesystem at 1733360297762Initializing all the Stores at 1733360297765 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360297765Cleaning up temporary data from old regions at 1733360297777 (+12 ms)Running coprocessor post-open hooks at 1733360297790 (+13 ms)Region opened successfully at 1733360297790 2024-12-05T00:58:17,790 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed60f0bae8dcbefaa675e8882731c906, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T00:58:17,791 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0., pid=10, masterSystemTime=1733360297735 2024-12-05T00:58:17,795 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T00:58:17,795 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T00:58:17,796 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=5033a9c1e8deb7792e7f44f66f21f1f0, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T00:58:17,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-12-05T00:58:17,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure ed60f0bae8dcbefaa675e8882731c906, server=2113c16e5528,32927,1733360288712 in 210 msec 2024-12-05T00:58:17,799 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ed60f0bae8dcbefaa675e8882731c906, ASSIGN in 391 msec 2024-12-05T00:58:17,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T00:58:17,804 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-05T00:58:17,804 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0, server=2113c16e5528,41191,1733360288910 in 227 msec 2024-12-05T00:58:17,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-05T00:58:17,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5033a9c1e8deb7792e7f44f66f21f1f0, ASSIGN in 399 msec 2024-12-05T00:58:17,809 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T00:58:17,809 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360297809"}]},"ts":"1733360297809"} 2024-12-05T00:58:17,812 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-05T00:58:17,814 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T00:58:17,818 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-05T00:58:17,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T00:58:17,835 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:58:17,835 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T00:58:17,836 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:58:17,837 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43495, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-05T00:58:17,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T00:58:17,843 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-05T00:58:17,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:58:17,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-05T00:58:17,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-05T00:58:17,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:17,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:17,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-05T00:58:17,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:17,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-05T00:58:17,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T00:58:17,878 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:17,878 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:17,878 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:17,878 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:17,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 664 msec 2024-12-05T00:58:18,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T00:58:18,305 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-05T00:58:18,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T00:58:18,305 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-05T00:58:18,307 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:18,307 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-05T00:58:18,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-05T00:58:18,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-05T00:58:18,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T00:58:18,310 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-05T00:58:18,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-05T00:58:18,310 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-05T00:58:18,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T00:58:18,310 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-05T00:58:18,311 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T00:58:18,311 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-05T00:58:18,311 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-05T00:58:18,311 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-05T00:58:18,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T00:58:18,362 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-05T00:58:18,363 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSplitRegion get assigned. Timeout = 60000ms 2024-12-05T00:58:18,364 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:58:18,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned to meta. Checking AM states. 2024-12-05T00:58:18,370 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:58:18,371 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned. 2024-12-05T00:58:18,374 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T00:58:18,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T00:58:18,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360298386 (current time:1733360298386). 2024-12-05T00:58:18,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T00:58:18,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-05T00:58:18,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T00:58:18,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31122853, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:18,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T00:58:18,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:58:18,390 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T00:58:18,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:58:18,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T00:58:18,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@440582b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:18,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T00:58:18,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:58:18,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:18,392 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51806, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:58:18,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f24608f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:18,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:58:18,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T00:58:18,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:58:18,396 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:58:18,398 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T00:58:18,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T00:58:18,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:18,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:18,404 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:58:18,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c9ad05e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:18,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T00:58:18,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:58:18,407 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T00:58:18,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:58:18,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T00:58:18,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49f963e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:18,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T00:58:18,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:58:18,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:18,409 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51822, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:58:18,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@222ec366, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:18,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:58:18,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T00:58:18,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:58:18,413 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44546, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:58:18,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T00:58:18,418 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T00:58:18,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T00:58:18,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:18,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:18,419 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:58:18,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-05T00:58:18,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T00:58:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T00:58:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-05T00:58:18,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-05T00:58:18,432 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T00:58:18,438 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T00:58:18,454 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T00:58:18,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741845_1021 (size=215) 2024-12-05T00:58:18,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741845_1021 (size=215) 2024-12-05T00:58:18,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741845_1021 (size=215) 2024-12-05T00:58:18,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-05T00:58:18,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-05T00:58:18,893 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T00:58:18,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed60f0bae8dcbefaa675e8882731c906}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0}] 2024-12-05T00:58:18,903 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:18,904 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-05T00:58:19,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-05T00:58:19,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-05T00:58:19,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T00:58:19,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:19,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 5033a9c1e8deb7792e7f44f66f21f1f0: 2024-12-05T00:58:19,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for ed60f0bae8dcbefaa675e8882731c906: 2024-12-05T00:58:19,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T00:58:19,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T00:58:19,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:19,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:19,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T00:58:19,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T00:58:19,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T00:58:19,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T00:58:19,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741846_1022 (size=86) 2024-12-05T00:58:19,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741846_1022 (size=86) 2024-12-05T00:58:19,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741846_1022 (size=86) 2024-12-05T00:58:19,090 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:19,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-05T00:58:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-05T00:58:19,095 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:19,095 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:19,101 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ed60f0bae8dcbefaa675e8882731c906 in 202 msec 2024-12-05T00:58:19,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741847_1023 (size=86) 2024-12-05T00:58:19,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741847_1023 (size=86) 2024-12-05T00:58:19,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741847_1023 (size=86) 2024-12-05T00:58:19,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T00:58:19,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-05T00:58:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-05T00:58:19,125 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:19,125 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:19,131 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-12-05T00:58:19,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0 in 232 msec 2024-12-05T00:58:19,132 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T00:58:19,134 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T00:58:19,138 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T00:58:19,139 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:19,142 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:19,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741848_1024 (size=597) 2024-12-05T00:58:19,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741848_1024 (size=597) 2024-12-05T00:58:19,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741848_1024 (size=597) 2024-12-05T00:58:19,173 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T00:58:19,188 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T00:58:19,189 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:19,194 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T00:58:19,194 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-05T00:58:19,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 768 msec 2024-12-05T00:58:19,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-05T00:58:19,573 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-05T00:58:19,585 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='0e09c076dfc8d55cf13eba160e5899a2b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T00:58:19,587 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='187a5970a9dacd14a42b88165295bd476', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T00:58:19,590 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='20c26ca288fa9394571c827c2bbfc3a23', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T00:58:19,592 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='329b66baaf75b8db042b433cf8dcfad86', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T00:58:19,594 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='451305f04dc16a728f6881e0374092b4f', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T00:58:19,595 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:58:19,595 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='56b705380a6725e4487996f9e6cdeb9ad', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T00:58:19,596 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='66ba93ae89958d84bf7518cf46bc1a449', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T00:58:19,598 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48028, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:58:19,599 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T00:58:19,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41191 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T00:58:19,609 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T00:58:19,613 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:19,614 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:19,615 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T00:58:19,617 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T00:58:19,633 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T00:58:19,671 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T00:58:19,693 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T00:58:19,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360299693 (current time:1733360299693). 2024-12-05T00:58:19,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T00:58:19,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-05T00:58:19,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T00:58:19,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5670f22f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:19,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T00:58:19,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:58:19,717 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T00:58:19,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:58:19,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T00:58:19,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36575ee6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:19,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T00:58:19,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:58:19,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:19,720 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51848, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:58:19,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@674c1e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:19,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:58:19,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T00:58:19,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:58:19,726 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44550, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:58:19,729 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T00:58:19,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T00:58:19,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:19,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:19,730 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:58:19,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4734775e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:19,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T00:58:19,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:58:19,738 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T00:58:19,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:58:19,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T00:58:19,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cf4b880, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:19,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T00:58:19,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:58:19,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:19,741 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51866, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:58:19,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2408a539, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:19,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:58:19,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T00:58:19,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:58:19,747 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44558, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:58:19,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T00:58:19,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T00:58:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T00:58:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:19,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:19,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-05T00:58:19,752 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:58:19,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T00:58:19,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T00:58:19,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-05T00:58:19,758 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T00:58:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-05T00:58:19,759 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T00:58:19,764 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T00:58:19,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741849_1025 (size=210) 2024-12-05T00:58:19,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741849_1025 (size=210) 2024-12-05T00:58:19,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741849_1025 (size=210) 2024-12-05T00:58:19,782 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T00:58:19,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed60f0bae8dcbefaa675e8882731c906}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0}] 2024-12-05T00:58:19,784 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:19,784 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:19,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-05T00:58:19,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-05T00:58:19,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-05T00:58:19,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T00:58:19,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:19,942 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 5033a9c1e8deb7792e7f44f66f21f1f0 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-05T00:58:19,942 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing ed60f0bae8dcbefaa675e8882731c906 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-05T00:58:20,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/.tmp/cf/7527ff86931145ff931f53f4978804af is 71, key is 0186b0145901092f8e1ac1fbde9d6f79/cf:q/1733360299599/Put/seqid=0 2024-12-05T00:58:20,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/.tmp/cf/b33bfc3a3f1d441db35be3bdcc370588 is 71, key is 1a2e92b1165d93663c7effaa7c056712/cf:q/1733360299604/Put/seqid=0 2024-12-05T00:58:20,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741851_1027 (size=5424) 2024-12-05T00:58:20,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741851_1027 (size=5424) 2024-12-05T00:58:20,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741851_1027 (size=5424) 2024-12-05T00:58:20,047 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/.tmp/cf/7527ff86931145ff931f53f4978804af 2024-12-05T00:58:20,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741850_1026 (size=8190) 2024-12-05T00:58:20,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741850_1026 (size=8190) 2024-12-05T00:58:20,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741850_1026 (size=8190) 2024-12-05T00:58:20,061 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/.tmp/cf/b33bfc3a3f1d441db35be3bdcc370588 2024-12-05T00:58:20,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-05T00:58:20,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/.tmp/cf/7527ff86931145ff931f53f4978804af as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/cf/7527ff86931145ff931f53f4978804af 2024-12-05T00:58:20,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/.tmp/cf/b33bfc3a3f1d441db35be3bdcc370588 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/cf/b33bfc3a3f1d441db35be3bdcc370588 2024-12-05T00:58:20,152 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/cf/7527ff86931145ff931f53f4978804af, entries=5, sequenceid=6, filesize=5.3 K 2024-12-05T00:58:20,154 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/cf/b33bfc3a3f1d441db35be3bdcc370588, entries=45, sequenceid=6, filesize=8.0 K 2024-12-05T00:58:20,164 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for ed60f0bae8dcbefaa675e8882731c906 in 217ms, sequenceid=6, compaction requested=false 2024-12-05T00:58:20,164 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 5033a9c1e8deb7792e7f44f66f21f1f0 in 217ms, sequenceid=6, compaction requested=false 2024-12-05T00:58:20,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-05T00:58:20,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-05T00:58:20,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 5033a9c1e8deb7792e7f44f66f21f1f0: 2024-12-05T00:58:20,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for ed60f0bae8dcbefaa675e8882731c906: 2024-12-05T00:58:20,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T00:58:20,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T00:58:20,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:20,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:20,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T00:58:20,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T00:58:20,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/cf/7527ff86931145ff931f53f4978804af] hfiles 2024-12-05T00:58:20,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/cf/b33bfc3a3f1d441db35be3bdcc370588] hfiles 2024-12-05T00:58:20,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/cf/7527ff86931145ff931f53f4978804af for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:20,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/cf/b33bfc3a3f1d441db35be3bdcc370588 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:20,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741852_1028 (size=125) 2024-12-05T00:58:20,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741852_1028 (size=125) 2024-12-05T00:58:20,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741852_1028 (size=125) 2024-12-05T00:58:20,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T00:58:20,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-05T00:58:20,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-05T00:58:20,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:20,236 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed60f0bae8dcbefaa675e8882731c906 2024-12-05T00:58:20,241 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ed60f0bae8dcbefaa675e8882731c906 in 456 msec 2024-12-05T00:58:20,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741853_1029 (size=125) 2024-12-05T00:58:20,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741853_1029 (size=125) 2024-12-05T00:58:20,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741853_1029 (size=125) 2024-12-05T00:58:20,252 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T00:58:20,252 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-05T00:58:20,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-05T00:58:20,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:20,253 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T00:58:20,259 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-12-05T00:58:20,259 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0 in 473 msec 2024-12-05T00:58:20,259 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T00:58:20,261 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T00:58:20,262 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T00:58:20,262 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:20,263 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:20,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741854_1030 (size=675) 2024-12-05T00:58:20,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741854_1030 (size=675) 2024-12-05T00:58:20,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741854_1030 (size=675) 2024-12-05T00:58:20,283 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T00:58:20,292 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T00:58:20,293 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:20,295 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T00:58:20,295 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-05T00:58:20,298 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 542 msec 2024-12-05T00:58:20,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-05T00:58:20,383 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-05T00:58:20,417 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:58:20,422 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:58:20,429 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44562, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:58:20,429 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T00:58:20,431 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-05T00:58:20,436 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48036, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:58:20,437 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41191 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-05T00:58:20,442 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43766, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T00:58:20,443 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44435 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-05T00:58:20,446 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T00:58:20,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:20,454 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T00:58:20,454 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:20,455 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-05T00:58:20,457 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T00:58:20,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-05T00:58:20,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-05T00:58:20,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741855_1031 (size=390) 2024-12-05T00:58:20,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741855_1031 (size=390) 2024-12-05T00:58:20,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741855_1031 (size=390) 2024-12-05T00:58:20,577 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 11543fac37630dff35bbfb80eab61340, NAME => 'testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T00:58:20,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741856_1032 (size=75) 2024-12-05T00:58:20,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741856_1032 (size=75) 2024-12-05T00:58:20,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741856_1032 (size=75) 2024-12-05T00:58:20,647 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:20,647 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 11543fac37630dff35bbfb80eab61340, disabling compactions & flushes 2024-12-05T00:58:20,647 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:20,647 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:20,647 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. after waiting 0 ms 2024-12-05T00:58:20,647 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:20,647 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:20,648 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 11543fac37630dff35bbfb80eab61340: Waiting for close lock at 1733360300647Disabling compacts and flushes for region at 1733360300647Disabling writes for close at 1733360300647Writing region close event to WAL at 1733360300647Closed at 1733360300647 2024-12-05T00:58:20,650 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T00:58:20,651 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733360300651"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360300651"}]},"ts":"1733360300651"} 2024-12-05T00:58:20,655 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T00:58:20,657 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T00:58:20,658 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360300657"}]},"ts":"1733360300657"} 2024-12-05T00:58:20,662 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-05T00:58:20,662 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T00:58:20,664 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T00:58:20,664 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T00:58:20,664 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T00:58:20,664 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T00:58:20,664 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T00:58:20,664 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T00:58:20,664 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T00:58:20,664 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T00:58:20,664 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T00:58:20,664 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T00:58:20,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=11543fac37630dff35bbfb80eab61340, ASSIGN}] 2024-12-05T00:58:20,668 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=11543fac37630dff35bbfb80eab61340, ASSIGN 2024-12-05T00:58:20,670 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=11543fac37630dff35bbfb80eab61340, ASSIGN; state=OFFLINE, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T00:58:20,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-05T00:58:20,821 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T00:58:20,821 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=11543fac37630dff35bbfb80eab61340, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:20,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=11543fac37630dff35bbfb80eab61340, ASSIGN because future has completed 2024-12-05T00:58:20,827 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 11543fac37630dff35bbfb80eab61340, server=2113c16e5528,32927,1733360288712}] 2024-12-05T00:58:20,992 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:20,992 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 11543fac37630dff35bbfb80eab61340, NAME => 'testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340.', STARTKEY => '', ENDKEY => ''} 2024-12-05T00:58:20,992 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. service=AccessControlService 2024-12-05T00:58:20,993 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T00:58:20,993 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:20,993 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:20,993 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:20,993 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:20,999 INFO [StoreOpener-11543fac37630dff35bbfb80eab61340-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:21,002 INFO [StoreOpener-11543fac37630dff35bbfb80eab61340-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 11543fac37630dff35bbfb80eab61340 columnFamilyName cf 2024-12-05T00:58:21,003 DEBUG [StoreOpener-11543fac37630dff35bbfb80eab61340-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:21,004 INFO [StoreOpener-11543fac37630dff35bbfb80eab61340-1 {}] regionserver.HStore(327): Store=11543fac37630dff35bbfb80eab61340/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:58:21,004 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:21,005 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:21,006 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:21,007 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:21,007 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:21,011 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:21,027 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T00:58:21,028 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 11543fac37630dff35bbfb80eab61340; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74269859, jitterRate=0.1067071408033371}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:58:21,029 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:21,031 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 11543fac37630dff35bbfb80eab61340: Running coprocessor pre-open hook at 1733360300993Writing region info on filesystem at 1733360300993Initializing all the Stores at 1733360300998 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360300998Cleaning up temporary data from old regions at 1733360301007 (+9 ms)Running coprocessor post-open hooks at 1733360301029 (+22 ms)Region opened successfully at 1733360301030 (+1 ms) 2024-12-05T00:58:21,032 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340., pid=20, masterSystemTime=1733360300985 2024-12-05T00:58:21,037 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:21,037 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:21,039 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=11543fac37630dff35bbfb80eab61340, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:21,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 11543fac37630dff35bbfb80eab61340, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T00:58:21,056 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-05T00:58:21,056 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 11543fac37630dff35bbfb80eab61340, server=2113c16e5528,32927,1733360288712 in 220 msec 2024-12-05T00:58:21,066 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-05T00:58:21,067 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=11543fac37630dff35bbfb80eab61340, ASSIGN in 391 msec 2024-12-05T00:58:21,068 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T00:58:21,070 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360301069"}]},"ts":"1733360301069"} 2024-12-05T00:58:21,074 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-05T00:58:21,076 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T00:58:21,076 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-05T00:58:21,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-05T00:58:21,082 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-05T00:58:21,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T00:58:21,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T00:58:21,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T00:58:21,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T00:58:21,091 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:21,091 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:21,091 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:21,091 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:21,091 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:21,091 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:21,092 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:21,092 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T00:58:21,095 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 645 msec 2024-12-05T00:58:21,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-05T00:58:21,593 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-05T00:58:21,593 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:21,598 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T00:58:23,251 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T00:58:23,418 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-05T00:58:24,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741857_1033 (size=134217728) 2024-12-05T00:58:24,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741857_1033 (size=134217728) 2024-12-05T00:58:24,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741857_1033 (size=134217728) 2024-12-05T00:58:26,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741858_1034 (size=134217728) 2024-12-05T00:58:26,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741858_1034 (size=134217728) 2024-12-05T00:58:26,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741858_1034 (size=134217728) 2024-12-05T00:58:26,729 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733360301626/Put/seqid=0 2024-12-05T00:58:26,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741859_1035 (size=51979256) 2024-12-05T00:58:26,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741859_1035 (size=51979256) 2024-12-05T00:58:26,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741859_1035 (size=51979256) 2024-12-05T00:58:26,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48bb7089, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:26,745 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T00:58:26,745 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:58:26,748 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T00:58:26,749 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:58:26,749 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T00:58:26,749 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65874f63, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:26,749 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T00:58:26,750 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:58:26,760 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:26,762 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45946, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:58:26,763 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5296fbb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:26,764 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:58:26,767 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T00:58:26,767 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:58:26,775 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39190, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:58:26,789 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:37149/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-05T00:58:26,789 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T00:58:26,791 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.AsyncConnectionImpl(321): The fetched master address is 2113c16e5528,33369,1733360287836 2024-12-05T00:58:26,791 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6325b645 2024-12-05T00:58:26,791 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T00:58:26,793 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45948, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T00:58:26,800 WARN [IPC Server handler 3 on default port 37149 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-05T00:58:26,807 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T00:58:26,816 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T00:58:26,833 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:37149/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-05T00:58:26,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-05T00:58:26,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:43495 deadline: 1733360366856, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-05T00:58:26,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T00:58:26,865 WARN [IPC Server handler 3 on default port 37149 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-05T00:58:26,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:37149/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/output/cf/test_file for inclusion in 11543fac37630dff35bbfb80eab61340/cf 2024-12-05T00:58:26,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-05T00:58:26,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-05T00:58:26,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:37149/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-05T00:58:26,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HRegion(2603): Flush status journal for 11543fac37630dff35bbfb80eab61340: 2024-12-05T00:58:26,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:37149/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/output/cf/test_file to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/staging/jenkins__testExportFileSystemStateWithSplitRegion__7i8qfvh7so13cqfqlaif61rgu9pu1nfrsbnj6hsg636gerea9rfj1cek0glpcu97/cf/test_file 2024-12-05T00:58:26,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/staging/jenkins__testExportFileSystemStateWithSplitRegion__7i8qfvh7so13cqfqlaif61rgu9pu1nfrsbnj6hsg636gerea9rfj1cek0glpcu97/cf/test_file as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_ 2024-12-05T00:58:26,905 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/staging/jenkins__testExportFileSystemStateWithSplitRegion__7i8qfvh7so13cqfqlaif61rgu9pu1nfrsbnj6hsg636gerea9rfj1cek0glpcu97/cf/test_file into 11543fac37630dff35bbfb80eab61340/cf as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_ - updating store file list. 2024-12-05T00:58:26,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 36715183efcd44ada3c7a1dfef37fee0_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T00:58:26,915 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_ into 11543fac37630dff35bbfb80eab61340/cf 2024-12-05T00:58:26,915 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/staging/jenkins__testExportFileSystemStateWithSplitRegion__7i8qfvh7so13cqfqlaif61rgu9pu1nfrsbnj6hsg636gerea9rfj1cek0glpcu97/cf/test_file into 11543fac37630dff35bbfb80eab61340/cf (new location: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_) 2024-12-05T00:58:26,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/staging/jenkins__testExportFileSystemStateWithSplitRegion__7i8qfvh7so13cqfqlaif61rgu9pu1nfrsbnj6hsg636gerea9rfj1cek0glpcu97/cf/test_file 2024-12-05T00:58:26,925 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T00:58:26,925 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T00:58:26,925 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:26,926 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:26,926 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:58:26,926 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T00:58:26,927 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340., hostname=2113c16e5528,32927,1733360288712, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340., hostname=2113c16e5528,32927,1733360288712, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=2113c16e5528:32927 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-05T00:58:26,928 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340., hostname=2113c16e5528,32927,1733360288712, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-05T00:58:26,928 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340., hostname=2113c16e5528,32927,1733360288712, seqNum=2 from cache 2024-12-05T00:58:26,928 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-12-05T00:58:26,933 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T00:58:26,943 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:26,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=2113c16e5528,32927,1733360288712 2024-12-05T00:58:26,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=11543fac37630dff35bbfb80eab61340, daughterA=80fe0601ee18b66bb88f139047761ef4, daughterB=3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:26,959 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=11543fac37630dff35bbfb80eab61340, daughterA=80fe0601ee18b66bb88f139047761ef4, daughterB=3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:26,959 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=11543fac37630dff35bbfb80eab61340, daughterA=80fe0601ee18b66bb88f139047761ef4, daughterB=3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:26,959 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=11543fac37630dff35bbfb80eab61340, daughterA=80fe0601ee18b66bb88f139047761ef4, daughterB=3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:26,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-05T00:58:26,974 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=11543fac37630dff35bbfb80eab61340, UNASSIGN}] 2024-12-05T00:58:26,976 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=11543fac37630dff35bbfb80eab61340, UNASSIGN 2024-12-05T00:58:26,978 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=11543fac37630dff35bbfb80eab61340, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:26,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=11543fac37630dff35bbfb80eab61340, UNASSIGN because future has completed 2024-12-05T00:58:26,982 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-05T00:58:26,982 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 11543fac37630dff35bbfb80eab61340, server=2113c16e5528,32927,1733360288712}] 2024-12-05T00:58:27,034 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=2113c16e5528:32927 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 34 more 2024-12-05T00:58:27,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-05T00:58:27,143 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:27,143 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-05T00:58:27,144 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 11543fac37630dff35bbfb80eab61340, disabling compactions & flushes 2024-12-05T00:58:27,144 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:27,144 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:27,144 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. after waiting 0 ms 2024-12-05T00:58:27,144 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:27,151 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-05T00:58:27,155 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T00:58:27,155 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340. 2024-12-05T00:58:27,155 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 11543fac37630dff35bbfb80eab61340: Waiting for close lock at 1733360307144Running coprocessor pre-close hooks at 1733360307144Disabling compacts and flushes for region at 1733360307144Disabling writes for close at 1733360307144Writing region close event to WAL at 1733360307145 (+1 ms)Running coprocessor post-close hooks at 1733360307151 (+6 ms)Closed at 1733360307155 (+4 ms) 2024-12-05T00:58:27,159 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:27,160 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=11543fac37630dff35bbfb80eab61340, regionState=CLOSED 2024-12-05T00:58:27,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 11543fac37630dff35bbfb80eab61340, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T00:58:27,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-05T00:58:27,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 11543fac37630dff35bbfb80eab61340, server=2113c16e5528,32927,1733360288712 in 181 msec 2024-12-05T00:58:27,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-05T00:58:27,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=11543fac37630dff35bbfb80eab61340, UNASSIGN in 192 msec 2024-12-05T00:58:27,183 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:27,190 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=11543fac37630dff35bbfb80eab61340, threads=1 2024-12-05T00:58:27,193 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_ for region: 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:27,203 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 36715183efcd44ada3c7a1dfef37fee0_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T00:58:27,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741860_1036 (size=21) 2024-12-05T00:58:27,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741860_1036 (size=21) 2024-12-05T00:58:27,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741860_1036 (size=21) 2024-12-05T00:58:27,237 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 36715183efcd44ada3c7a1dfef37fee0_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T00:58:27,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741861_1037 (size=21) 2024-12-05T00:58:27,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741861_1037 (size=21) 2024-12-05T00:58:27,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741861_1037 (size=21) 2024-12-05T00:58:27,255 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_ for region: 11543fac37630dff35bbfb80eab61340 2024-12-05T00:58:27,257 DEBUG [PEWorker-1 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 11543fac37630dff35bbfb80eab61340 Daughter A: [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340] storefiles, Daughter B: [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340] storefiles. 2024-12-05T00:58:27,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-05T00:58:27,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741862_1038 (size=76) 2024-12-05T00:58:27,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741862_1038 (size=76) 2024-12-05T00:58:27,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741862_1038 (size=76) 2024-12-05T00:58:27,314 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:27,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741863_1039 (size=76) 2024-12-05T00:58:27,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741863_1039 (size=76) 2024-12-05T00:58:27,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741863_1039 (size=76) 2024-12-05T00:58:27,337 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:27,348 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-05T00:58:27,351 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-05T00:58:27,355 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733360307355"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733360307355"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733360307355"}]},"ts":"1733360307355"} 2024-12-05T00:58:27,355 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733360307355"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360307355"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733360307355"}]},"ts":"1733360307355"} 2024-12-05T00:58:27,356 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733360307355"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360307355"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733360307355"}]},"ts":"1733360307355"} 2024-12-05T00:58:27,370 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=80fe0601ee18b66bb88f139047761ef4, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3163794146e8ea373b4b6cfb860e728f, ASSIGN}] 2024-12-05T00:58:27,372 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3163794146e8ea373b4b6cfb860e728f, ASSIGN 2024-12-05T00:58:27,372 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=80fe0601ee18b66bb88f139047761ef4, ASSIGN 2024-12-05T00:58:27,373 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3163794146e8ea373b4b6cfb860e728f, ASSIGN; state=SPLITTING_NEW, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T00:58:27,374 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=80fe0601ee18b66bb88f139047761ef4, ASSIGN; state=SPLITTING_NEW, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T00:58:27,524 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T00:58:27,524 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=3163794146e8ea373b4b6cfb860e728f, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:27,524 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=80fe0601ee18b66bb88f139047761ef4, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:27,528 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=80fe0601ee18b66bb88f139047761ef4, ASSIGN because future has completed 2024-12-05T00:58:27,529 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 80fe0601ee18b66bb88f139047761ef4, server=2113c16e5528,32927,1733360288712}] 2024-12-05T00:58:27,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3163794146e8ea373b4b6cfb860e728f, ASSIGN because future has completed 2024-12-05T00:58:27,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3163794146e8ea373b4b6cfb860e728f, server=2113c16e5528,32927,1733360288712}] 2024-12-05T00:58:27,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-05T00:58:27,688 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. 2024-12-05T00:58:27,688 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => 80fe0601ee18b66bb88f139047761ef4, NAME => 'testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4.', STARTKEY => '', ENDKEY => '5'} 2024-12-05T00:58:27,688 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. service=AccessControlService 2024-12-05T00:58:27,689 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T00:58:27,689 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:27,689 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:27,689 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:27,689 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:27,691 INFO [StoreOpener-80fe0601ee18b66bb88f139047761ef4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:27,692 INFO [StoreOpener-80fe0601ee18b66bb88f139047761ef4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 80fe0601ee18b66bb88f139047761ef4 columnFamilyName cf 2024-12-05T00:58:27,692 DEBUG [StoreOpener-80fe0601ee18b66bb88f139047761ef4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:27,716 DEBUG [StoreFileOpener-80fe0601ee18b66bb88f139047761ef4-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340: NONE, but ROW specified in column family configuration 2024-12-05T00:58:27,735 DEBUG [StoreOpener-80fe0601ee18b66bb88f139047761ef4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340->hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_-bottom 2024-12-05T00:58:27,735 INFO [StoreOpener-80fe0601ee18b66bb88f139047761ef4-1 {}] regionserver.HStore(327): Store=80fe0601ee18b66bb88f139047761ef4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:58:27,736 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:27,737 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:27,739 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:27,740 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:27,740 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:27,744 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:27,747 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened 80fe0601ee18b66bb88f139047761ef4; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69129522, jitterRate=0.030110150575637817}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:58:27,748 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:27,749 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for 80fe0601ee18b66bb88f139047761ef4: Running coprocessor pre-open hook at 1733360307689Writing region info on filesystem at 1733360307689Initializing all the Stores at 1733360307691 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360307691Cleaning up temporary data from old regions at 1733360307740 (+49 ms)Running coprocessor post-open hooks at 1733360307748 (+8 ms)Region opened successfully at 1733360307748 2024-12-05T00:58:27,750 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4., pid=26, masterSystemTime=1733360307682 2024-12-05T00:58:27,751 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4.,because compaction is disabled. 2024-12-05T00:58:27,754 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. 2024-12-05T00:58:27,754 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. 2024-12-05T00:58:27,755 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. 2024-12-05T00:58:27,755 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => 3163794146e8ea373b4b6cfb860e728f, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f.', STARTKEY => '5', ENDKEY => ''} 2024-12-05T00:58:27,755 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=80fe0601ee18b66bb88f139047761ef4, regionState=OPEN, openSeqNum=7, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:27,755 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. service=AccessControlService 2024-12-05T00:58:27,756 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T00:58:27,756 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:27,756 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T00:58:27,756 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:27,756 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:27,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 80fe0601ee18b66bb88f139047761ef4, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T00:58:27,764 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=24 2024-12-05T00:58:27,765 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure 80fe0601ee18b66bb88f139047761ef4, server=2113c16e5528,32927,1733360288712 in 233 msec 2024-12-05T00:58:27,767 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=80fe0601ee18b66bb88f139047761ef4, ASSIGN in 395 msec 2024-12-05T00:58:27,768 INFO [StoreOpener-3163794146e8ea373b4b6cfb860e728f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:27,770 INFO [StoreOpener-3163794146e8ea373b4b6cfb860e728f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3163794146e8ea373b4b6cfb860e728f columnFamilyName cf 2024-12-05T00:58:27,770 DEBUG [StoreOpener-3163794146e8ea373b4b6cfb860e728f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:27,785 DEBUG [StoreFileOpener-3163794146e8ea373b4b6cfb860e728f-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340: NONE, but ROW specified in column family configuration 2024-12-05T00:58:27,788 DEBUG [StoreOpener-3163794146e8ea373b4b6cfb860e728f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340->hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_-top 2024-12-05T00:58:27,788 INFO [StoreOpener-3163794146e8ea373b4b6cfb860e728f-1 {}] regionserver.HStore(327): Store=3163794146e8ea373b4b6cfb860e728f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T00:58:27,789 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:27,790 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:27,792 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:27,793 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:27,793 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:27,795 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:27,797 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened 3163794146e8ea373b4b6cfb860e728f; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59986647, jitterRate=-0.10612930357456207}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T00:58:27,797 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:27,797 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for 3163794146e8ea373b4b6cfb860e728f: Running coprocessor pre-open hook at 1733360307757Writing region info on filesystem at 1733360307757Initializing all the Stores at 1733360307758 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360307759 (+1 ms)Cleaning up temporary data from old regions at 1733360307793 (+34 ms)Running coprocessor post-open hooks at 1733360307797 (+4 ms)Region opened successfully at 1733360307797 2024-12-05T00:58:27,799 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f., pid=27, masterSystemTime=1733360307682 2024-12-05T00:58:27,799 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f.,because compaction is disabled. 2024-12-05T00:58:27,804 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=3163794146e8ea373b4b6cfb860e728f, regionState=OPEN, openSeqNum=7, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T00:58:27,805 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. 2024-12-05T00:58:27,805 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. 2024-12-05T00:58:27,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3163794146e8ea373b4b6cfb860e728f, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T00:58:27,814 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-12-05T00:58:27,814 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure 3163794146e8ea373b4b6cfb860e728f, server=2113c16e5528,32927,1733360288712 in 281 msec 2024-12-05T00:58:27,818 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-12-05T00:58:27,818 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3163794146e8ea373b4b6cfb860e728f, ASSIGN in 444 msec 2024-12-05T00:58:27,821 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=11543fac37630dff35bbfb80eab61340, daughterA=80fe0601ee18b66bb88f139047761ef4, daughterB=3163794146e8ea373b4b6cfb860e728f in 866 msec 2024-12-05T00:58:28,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-05T00:58:28,102 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-05T00:58:28,102 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T00:58:28,107 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T00:58:28,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360308107 (current time:1733360308107). 2024-12-05T00:58:28,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T00:58:28,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-05T00:58:28,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T00:58:28,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1eb45816, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:28,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T00:58:28,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:58:28,109 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T00:58:28,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:58:28,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T00:58:28,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eee3448, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:28,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T00:58:28,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:58:28,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:28,111 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45978, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:58:28,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47bd1801, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:28,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:58:28,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T00:58:28,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:58:28,113 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39200, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:58:28,115 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T00:58:28,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T00:58:28,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:28,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:28,115 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:58:28,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@142f31a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:28,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T00:58:28,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T00:58:28,117 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T00:58:28,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T00:58:28,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T00:58:28,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49dbf7d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:28,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T00:58:28,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T00:58:28,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:28,119 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45988, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T00:58:28,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4e348f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T00:58:28,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T00:58:28,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T00:58:28,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T00:58:28,122 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39214, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T00:58:28,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T00:58:28,126 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T00:58:28,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T00:58:28,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:28,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T00:58:28,126 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T00:58:28,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-05T00:58:28,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T00:58:28,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T00:58:28,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-05T00:58:28,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-05T00:58:28,130 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T00:58:28,131 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T00:58:28,134 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T00:58:28,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741864_1040 (size=197) 2024-12-05T00:58:28,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741864_1040 (size=197) 2024-12-05T00:58:28,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741864_1040 (size=197) 2024-12-05T00:58:28,147 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T00:58:28,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80fe0601ee18b66bb88f139047761ef4}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3163794146e8ea373b4b6cfb860e728f}] 2024-12-05T00:58:28,149 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:28,149 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:28,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-05T00:58:28,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-05T00:58:28,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. 2024-12-05T00:58:28,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-05T00:58:28,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. 2024-12-05T00:58:28,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for 80fe0601ee18b66bb88f139047761ef4: 2024-12-05T00:58:28,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T00:58:28,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for 3163794146e8ea373b4b6cfb860e728f: 2024-12-05T00:58:28,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T00:58:28,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:28,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T00:58:28,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:28,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T00:58:28,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340->hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_-bottom] hfiles 2024-12-05T00:58:28,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:28,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340->hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_-top] hfiles 2024-12-05T00:58:28,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:28,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:28,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-05T00:58:28,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741865_1041 (size=182) 2024-12-05T00:58:28,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741865_1041 (size=182) 2024-12-05T00:58:28,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741865_1041 (size=182) 2024-12-05T00:58:28,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. 2024-12-05T00:58:28,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-05T00:58:28,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-05T00:58:28,324 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:28,325 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3163794146e8ea373b4b6cfb860e728f 2024-12-05T00:58:28,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3163794146e8ea373b4b6cfb860e728f in 178 msec 2024-12-05T00:58:28,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741866_1042 (size=182) 2024-12-05T00:58:28,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741866_1042 (size=182) 2024-12-05T00:58:28,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741866_1042 (size=182) 2024-12-05T00:58:28,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. 2024-12-05T00:58:28,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-05T00:58:28,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-05T00:58:28,336 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:28,336 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80fe0601ee18b66bb88f139047761ef4 2024-12-05T00:58:28,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-12-05T00:58:28,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 80fe0601ee18b66bb88f139047761ef4 in 189 msec 2024-12-05T00:58:28,340 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T00:58:28,342 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T00:58:28,342 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T00:58:28,342 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T00:58:28,344 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_] hfiles 2024-12-05T00:58:28,344 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_ 2024-12-05T00:58:28,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741867_1043 (size=129) 2024-12-05T00:58:28,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741867_1043 (size=129) 2024-12-05T00:58:28,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741867_1043 (size=129) 2024-12-05T00:58:28,359 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 11543fac37630dff35bbfb80eab61340, NAME => 'testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:28,360 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T00:58:28,361 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T00:58:28,362 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:28,363 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:28,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741868_1044 (size=891) 2024-12-05T00:58:28,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741868_1044 (size=891) 2024-12-05T00:58:28,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741868_1044 (size=891) 2024-12-05T00:58:28,384 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T00:58:28,392 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T00:58:28,393 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:28,394 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T00:58:28,394 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-05T00:58:28,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 267 msec 2024-12-05T00:58:28,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-05T00:58:28,442 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-05T00:58:28,443 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360308442 2024-12-05T00:58:28,443 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37149, tgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360308442, rawTgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360308442, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T00:58:28,484 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T00:58:28,484 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360308442, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360308442/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:28,489 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T00:58:28,496 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360308442/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T00:58:28,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741869_1045 (size=197) 2024-12-05T00:58:28,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741869_1045 (size=197) 2024-12-05T00:58:28,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741869_1045 (size=197) 2024-12-05T00:58:28,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741870_1046 (size=891) 2024-12-05T00:58:28,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741870_1046 (size=891) 2024-12-05T00:58:28,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741870_1046 (size=891) 2024-12-05T00:58:28,542 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T00:58:28,543 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T00:58:28,543 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T00:58:29,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-8091916142006055572.jar 2024-12-05T00:58:29,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T00:58:29,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T00:58:29,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-9754901495450877451.jar 2024-12-05T00:58:29,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T00:58:29,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T00:58:29,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T00:58:29,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T00:58:29,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T00:58:29,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T00:58:29,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T00:58:29,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T00:58:29,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T00:58:29,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T00:58:29,767 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T00:58:29,768 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T00:58:29,768 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T00:58:29,768 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T00:58:29,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T00:58:29,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T00:58:29,770 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T00:58:29,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T00:58:29,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T00:58:29,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T00:58:29,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T00:58:29,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T00:58:29,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T00:58:29,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T00:58:29,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741871_1047 (size=24020) 2024-12-05T00:58:29,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741871_1047 (size=24020) 2024-12-05T00:58:29,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741871_1047 (size=24020) 2024-12-05T00:58:29,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741872_1048 (size=77755) 2024-12-05T00:58:29,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741872_1048 (size=77755) 2024-12-05T00:58:29,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741872_1048 (size=77755) 2024-12-05T00:58:30,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741873_1049 (size=131360) 2024-12-05T00:58:30,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741873_1049 (size=131360) 2024-12-05T00:58:30,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741873_1049 (size=131360) 2024-12-05T00:58:30,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741874_1050 (size=111793) 2024-12-05T00:58:30,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741874_1050 (size=111793) 2024-12-05T00:58:30,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741874_1050 (size=111793) 2024-12-05T00:58:30,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741875_1051 (size=1832290) 2024-12-05T00:58:30,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741875_1051 (size=1832290) 2024-12-05T00:58:30,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741875_1051 (size=1832290) 2024-12-05T00:58:30,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741876_1052 (size=8360282) 2024-12-05T00:58:30,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741876_1052 (size=8360282) 2024-12-05T00:58:30,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741876_1052 (size=8360282) 2024-12-05T00:58:30,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741877_1053 (size=503880) 2024-12-05T00:58:30,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741877_1053 (size=503880) 2024-12-05T00:58:30,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741877_1053 (size=503880) 2024-12-05T00:58:30,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741878_1054 (size=322274) 2024-12-05T00:58:30,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741878_1054 (size=322274) 2024-12-05T00:58:30,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741878_1054 (size=322274) 2024-12-05T00:58:30,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741879_1055 (size=20406) 2024-12-05T00:58:30,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741879_1055 (size=20406) 2024-12-05T00:58:30,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741879_1055 (size=20406) 2024-12-05T00:58:30,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741880_1056 (size=443171) 2024-12-05T00:58:30,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741880_1056 (size=443171) 2024-12-05T00:58:30,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741880_1056 (size=443171) 2024-12-05T00:58:30,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741881_1057 (size=45609) 2024-12-05T00:58:30,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741881_1057 (size=45609) 2024-12-05T00:58:30,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741881_1057 (size=45609) 2024-12-05T00:58:30,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741882_1058 (size=6424744) 2024-12-05T00:58:30,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741882_1058 (size=6424744) 2024-12-05T00:58:30,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741882_1058 (size=6424744) 2024-12-05T00:58:30,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741883_1059 (size=136454) 2024-12-05T00:58:30,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741883_1059 (size=136454) 2024-12-05T00:58:30,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741883_1059 (size=136454) 2024-12-05T00:58:30,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741884_1060 (size=1597136) 2024-12-05T00:58:30,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741884_1060 (size=1597136) 2024-12-05T00:58:30,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741884_1060 (size=1597136) 2024-12-05T00:58:30,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741885_1061 (size=30873) 2024-12-05T00:58:30,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741885_1061 (size=30873) 2024-12-05T00:58:30,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741885_1061 (size=30873) 2024-12-05T00:58:30,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741886_1062 (size=29229) 2024-12-05T00:58:30,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741886_1062 (size=29229) 2024-12-05T00:58:30,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741886_1062 (size=29229) 2024-12-05T00:58:30,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741887_1063 (size=903858) 2024-12-05T00:58:30,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741887_1063 (size=903858) 2024-12-05T00:58:30,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741887_1063 (size=903858) 2024-12-05T00:58:30,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741888_1064 (size=5175431) 2024-12-05T00:58:30,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741888_1064 (size=5175431) 2024-12-05T00:58:30,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741888_1064 (size=5175431) 2024-12-05T00:58:30,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741889_1065 (size=232881) 2024-12-05T00:58:30,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741889_1065 (size=232881) 2024-12-05T00:58:30,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741889_1065 (size=232881) 2024-12-05T00:58:30,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741890_1066 (size=1323991) 2024-12-05T00:58:30,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741890_1066 (size=1323991) 2024-12-05T00:58:30,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741890_1066 (size=1323991) 2024-12-05T00:58:30,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741891_1067 (size=4695811) 2024-12-05T00:58:30,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741891_1067 (size=4695811) 2024-12-05T00:58:30,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741891_1067 (size=4695811) 2024-12-05T00:58:30,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741892_1068 (size=1877034) 2024-12-05T00:58:30,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741892_1068 (size=1877034) 2024-12-05T00:58:30,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741892_1068 (size=1877034) 2024-12-05T00:58:30,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741893_1069 (size=217555) 2024-12-05T00:58:30,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741893_1069 (size=217555) 2024-12-05T00:58:30,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741893_1069 (size=217555) 2024-12-05T00:58:30,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741894_1070 (size=4188619) 2024-12-05T00:58:30,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741894_1070 (size=4188619) 2024-12-05T00:58:30,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741894_1070 (size=4188619) 2024-12-05T00:58:30,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741895_1071 (size=127628) 2024-12-05T00:58:30,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741895_1071 (size=127628) 2024-12-05T00:58:30,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741895_1071 (size=127628) 2024-12-05T00:58:30,782 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T00:58:30,787 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-05T00:58:30,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=11543fac37630dff35bbfb80eab61340-36715183efcd44ada3c7a1dfef37fee0_SeqId_4_. 2024-12-05T00:58:30,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=11543fac37630dff35bbfb80eab61340-36715183efcd44ada3c7a1dfef37fee0_SeqId_4_. 2024-12-05T00:58:30,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-05T00:58:30,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741896_1072 (size=244) 2024-12-05T00:58:30,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741896_1072 (size=244) 2024-12-05T00:58:30,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741896_1072 (size=244) 2024-12-05T00:58:30,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741897_1073 (size=17) 2024-12-05T00:58:30,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741897_1073 (size=17) 2024-12-05T00:58:30,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741897_1073 (size=17) 2024-12-05T00:58:31,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741898_1074 (size=304135) 2024-12-05T00:58:31,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741898_1074 (size=304135) 2024-12-05T00:58:31,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741898_1074 (size=304135) 2024-12-05T00:58:31,388 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T00:58:31,388 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T00:58:31,616 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0001_000001 (auth:SIMPLE) from 127.0.0.1:40952 2024-12-05T00:58:32,658 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T00:58:36,695 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T00:58:39,207 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0001_000001 (auth:SIMPLE) from 127.0.0.1:45988 2024-12-05T00:58:39,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741899_1075 (size=349833) 2024-12-05T00:58:39,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741899_1075 (size=349833) 2024-12-05T00:58:39,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741899_1075 (size=349833) 2024-12-05T00:58:41,579 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0001_000001 (auth:SIMPLE) from 127.0.0.1:46468 2024-12-05T00:58:52,659 INFO [master/2113c16e5528:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-05T00:58:52,659 INFO [master/2113c16e5528:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-05T00:59:02,747 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ed60f0bae8dcbefaa675e8882731c906, had cached 0 bytes from a total of 5424 2024-12-05T00:59:02,762 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5033a9c1e8deb7792e7f44f66f21f1f0, had cached 0 bytes from a total of 8190 2024-12-05T00:59:06,696 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T00:59:11,350 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 29254d26df66b48412cf870ed83cb900 changed from -1.0 to 0.0, refreshing cache 2024-12-05T00:59:11,351 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ed60f0bae8dcbefaa675e8882731c906 changed from -1.0 to 0.0, refreshing cache 2024-12-05T00:59:11,352 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5033a9c1e8deb7792e7f44f66f21f1f0 changed from -1.0 to 0.0, refreshing cache 2024-12-05T00:59:12,689 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 80fe0601ee18b66bb88f139047761ef4, had cached 0 bytes from a total of 320414712 2024-12-05T00:59:12,756 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3163794146e8ea373b4b6cfb860e728f, had cached 0 bytes from a total of 320414712 2024-12-05T00:59:24,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741900_1076 (size=134217728) 2024-12-05T00:59:24,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741900_1076 (size=134217728) 2024-12-05T00:59:24,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741900_1076 (size=134217728) 2024-12-05T00:59:36,696 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T00:59:47,747 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ed60f0bae8dcbefaa675e8882731c906, had cached 0 bytes from a total of 5424 2024-12-05T00:59:47,762 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5033a9c1e8deb7792e7f44f66f21f1f0, had cached 0 bytes from a total of 8190 2024-12-05T00:59:57,690 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 80fe0601ee18b66bb88f139047761ef4, had cached 0 bytes from a total of 320414712 2024-12-05T00:59:57,757 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3163794146e8ea373b4b6cfb860e728f, had cached 0 bytes from a total of 320414712 2024-12-05T01:00:02,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741901_1077 (size=134217728) 2024-12-05T01:00:02,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741901_1077 (size=134217728) 2024-12-05T01:00:02,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741901_1077 (size=134217728) 2024-12-05T01:00:06,696 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:00:10,464 WARN [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 7, running: 1 2024-12-05T01:00:16,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741902_1078 (size=51979256) 2024-12-05T01:00:16,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741902_1078 (size=51979256) 2024-12-05T01:00:16,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741902_1078 (size=51979256) 2024-12-05T01:00:16,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741903_1079 (size=17520) 2024-12-05T01:00:16,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741903_1079 (size=17520) 2024-12-05T01:00:16,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741903_1079 (size=17520) 2024-12-05T01:00:16,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741904_1080 (size=482) 2024-12-05T01:00:16,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741904_1080 (size=482) 2024-12-05T01:00:16,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741904_1080 (size=482) 2024-12-05T01:00:16,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741905_1081 (size=17520) 2024-12-05T01:00:16,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741905_1081 (size=17520) 2024-12-05T01:00:16,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741905_1081 (size=17520) 2024-12-05T01:00:16,610 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0001/container_1733360294895_0001_01_000002/launch_container.sh] 2024-12-05T01:00:16,611 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0001/container_1733360294895_0001_01_000002/container_tokens] 2024-12-05T01:00:16,611 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0001/container_1733360294895_0001_01_000002/sysfs] 2024-12-05T01:00:16,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741906_1082 (size=349833) 2024-12-05T01:00:16,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741906_1082 (size=349833) 2024-12-05T01:00:16,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741906_1082 (size=349833) 2024-12-05T01:00:18,112 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T01:00:18,114 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T01:00:18,121 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,121 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T01:00:18,122 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T01:00:18,122 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,122 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-05T01:00:18,123 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-05T01:00:18,123 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360308442/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360308442/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,123 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360308442/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-05T01:00:18,123 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360308442/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-05T01:00:18,137 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-05T01:00:18,146 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360418145"}]},"ts":"1733360418145"} 2024-12-05T01:00:18,148 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-05T01:00:18,148 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-05T01:00:18,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-05T01:00:18,155 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=80fe0601ee18b66bb88f139047761ef4, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3163794146e8ea373b4b6cfb860e728f, UNASSIGN}] 2024-12-05T01:00:18,156 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3163794146e8ea373b4b6cfb860e728f, UNASSIGN 2024-12-05T01:00:18,156 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=80fe0601ee18b66bb88f139047761ef4, UNASSIGN 2024-12-05T01:00:18,158 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=80fe0601ee18b66bb88f139047761ef4, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:18,158 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=3163794146e8ea373b4b6cfb860e728f, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:18,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3163794146e8ea373b4b6cfb860e728f, UNASSIGN because future has completed 2024-12-05T01:00:18,160 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:00:18,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3163794146e8ea373b4b6cfb860e728f, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:00:18,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=80fe0601ee18b66bb88f139047761ef4, UNASSIGN because future has completed 2024-12-05T01:00:18,161 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:00:18,161 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 80fe0601ee18b66bb88f139047761ef4, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:00:18,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-05T01:00:18,313 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 3163794146e8ea373b4b6cfb860e728f 2024-12-05T01:00:18,314 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:00:18,314 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 3163794146e8ea373b4b6cfb860e728f, disabling compactions & flushes 2024-12-05T01:00:18,314 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. 2024-12-05T01:00:18,314 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. 2024-12-05T01:00:18,314 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. after waiting 0 ms 2024-12-05T01:00:18,314 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. 2024-12-05T01:00:18,319 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-05T01:00:18,320 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:00:18,320 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f. 2024-12-05T01:00:18,320 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 3163794146e8ea373b4b6cfb860e728f: Waiting for close lock at 1733360418314Running coprocessor pre-close hooks at 1733360418314Disabling compacts and flushes for region at 1733360418314Disabling writes for close at 1733360418314Writing region close event to WAL at 1733360418315 (+1 ms)Running coprocessor post-close hooks at 1733360418319 (+4 ms)Closed at 1733360418320 (+1 ms) 2024-12-05T01:00:18,322 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 3163794146e8ea373b4b6cfb860e728f 2024-12-05T01:00:18,322 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close 80fe0601ee18b66bb88f139047761ef4 2024-12-05T01:00:18,322 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:00:18,322 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing 80fe0601ee18b66bb88f139047761ef4, disabling compactions & flushes 2024-12-05T01:00:18,322 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. 2024-12-05T01:00:18,322 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. 2024-12-05T01:00:18,322 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. after waiting 0 ms 2024-12-05T01:00:18,322 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. 2024-12-05T01:00:18,322 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=3163794146e8ea373b4b6cfb860e728f, regionState=CLOSED 2024-12-05T01:00:18,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3163794146e8ea373b4b6cfb860e728f, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:00:18,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-12-05T01:00:18,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 3163794146e8ea373b4b6cfb860e728f, server=2113c16e5528,32927,1733360288712 in 166 msec 2024-12-05T01:00:18,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3163794146e8ea373b4b6cfb860e728f, UNASSIGN in 173 msec 2024-12-05T01:00:18,336 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-05T01:00:18,336 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:00:18,337 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4. 2024-12-05T01:00:18,337 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for 80fe0601ee18b66bb88f139047761ef4: Waiting for close lock at 1733360418322Running coprocessor pre-close hooks at 1733360418322Disabling compacts and flushes for region at 1733360418322Disabling writes for close at 1733360418322Writing region close event to WAL at 1733360418323 (+1 ms)Running coprocessor post-close hooks at 1733360418336 (+13 ms)Closed at 1733360418337 (+1 ms) 2024-12-05T01:00:18,339 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed 80fe0601ee18b66bb88f139047761ef4 2024-12-05T01:00:18,340 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=80fe0601ee18b66bb88f139047761ef4, regionState=CLOSED 2024-12-05T01:00:18,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 80fe0601ee18b66bb88f139047761ef4, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:00:18,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=33 2024-12-05T01:00:18,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure 80fe0601ee18b66bb88f139047761ef4, server=2113c16e5528,32927,1733360288712 in 182 msec 2024-12-05T01:00:18,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=33, resume processing ppid=32 2024-12-05T01:00:18,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=80fe0601ee18b66bb88f139047761ef4, UNASSIGN in 190 msec 2024-12-05T01:00:18,350 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-05T01:00:18,350 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 199 msec 2024-12-05T01:00:18,352 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360418352"}]},"ts":"1733360418352"} 2024-12-05T01:00:18,354 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-05T01:00:18,354 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-05T01:00:18,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 216 msec 2024-12-05T01:00:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-05T01:00:18,462 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-05T01:00:18,465 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,471 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,473 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,476 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:18,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:18,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:18,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:18,483 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340 2024-12-05T01:00:18,483 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-05T01:00:18,483 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-05T01:00:18,483 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-05T01:00:18,483 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-05T01:00:18,483 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-05T01:00:18,483 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-05T01:00:18,483 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-05T01:00:18,483 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-05T01:00:18,483 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4 2024-12-05T01:00:18,484 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:18,484 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f 2024-12-05T01:00:18,485 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-05T01:00:18,485 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:18,485 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:18,487 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/recovered.edits] 2024-12-05T01:00:18,487 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/recovered.edits] 2024-12-05T01:00:18,488 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/recovered.edits] 2024-12-05T01:00:18,496 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340 2024-12-05T01:00:18,496 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_.11543fac37630dff35bbfb80eab61340 2024-12-05T01:00:18,496 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_ to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/cf/36715183efcd44ada3c7a1dfef37fee0_SeqId_4_ 2024-12-05T01:00:18,499 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/recovered.edits/10.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f/recovered.edits/10.seqid 2024-12-05T01:00:18,500 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/recovered.edits/6.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340/recovered.edits/6.seqid 2024-12-05T01:00:18,500 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/3163794146e8ea373b4b6cfb860e728f 2024-12-05T01:00:18,500 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/recovered.edits/10.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4/recovered.edits/10.seqid 2024-12-05T01:00:18,501 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/11543fac37630dff35bbfb80eab61340 2024-12-05T01:00:18,501 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportFileSystemStateWithSplitRegion/80fe0601ee18b66bb88f139047761ef4 2024-12-05T01:00:18,501 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-05T01:00:18,504 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32927 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-05T01:00:18,513 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-05T01:00:18,517 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-05T01:00:18,518 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,519 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-05T01:00:18,519 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360418519"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:18,519 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360418519"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:18,519 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360418519"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:18,522 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-05T01:00:18,522 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 11543fac37630dff35bbfb80eab61340, NAME => 'testExportFileSystemStateWithSplitRegion,,1733360300446.11543fac37630dff35bbfb80eab61340.', STARTKEY => '', ENDKEY => ''}, {ENCODED => 80fe0601ee18b66bb88f139047761ef4, NAME => 'testExportFileSystemStateWithSplitRegion,,1733360306951.80fe0601ee18b66bb88f139047761ef4.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => 3163794146e8ea373b4b6cfb860e728f, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733360306951.3163794146e8ea373b4b6cfb860e728f.', STARTKEY => '5', ENDKEY => ''}] 2024-12-05T01:00:18,522 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-05T01:00:18,522 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360418522"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:18,524 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-05T01:00:18,525 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,527 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 59 msec 2024-12-05T01:00:18,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-05T01:00:18,592 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,593 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-05T01:00:18,593 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-05T01:00:18,597 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360418597"}]},"ts":"1733360418597"} 2024-12-05T01:00:18,599 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-05T01:00:18,599 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-05T01:00:18,600 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-05T01:00:18,602 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ed60f0bae8dcbefaa675e8882731c906, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5033a9c1e8deb7792e7f44f66f21f1f0, UNASSIGN}] 2024-12-05T01:00:18,603 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5033a9c1e8deb7792e7f44f66f21f1f0, UNASSIGN 2024-12-05T01:00:18,603 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ed60f0bae8dcbefaa675e8882731c906, UNASSIGN 2024-12-05T01:00:18,604 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=5033a9c1e8deb7792e7f44f66f21f1f0, regionState=CLOSING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:00:18,604 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=ed60f0bae8dcbefaa675e8882731c906, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:18,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5033a9c1e8deb7792e7f44f66f21f1f0, UNASSIGN because future has completed 2024-12-05T01:00:18,606 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:00:18,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:00:18,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ed60f0bae8dcbefaa675e8882731c906, UNASSIGN because future has completed 2024-12-05T01:00:18,607 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:00:18,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure ed60f0bae8dcbefaa675e8882731c906, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:00:18,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-05T01:00:18,760 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T01:00:18,760 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:00:18,760 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 5033a9c1e8deb7792e7f44f66f21f1f0, disabling compactions & flushes 2024-12-05T01:00:18,760 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T01:00:18,761 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T01:00:18,761 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. after waiting 0 ms 2024-12-05T01:00:18,761 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T01:00:18,761 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close ed60f0bae8dcbefaa675e8882731c906 2024-12-05T01:00:18,761 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:00:18,761 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing ed60f0bae8dcbefaa675e8882731c906, disabling compactions & flushes 2024-12-05T01:00:18,761 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T01:00:18,761 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T01:00:18,761 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. after waiting 0 ms 2024-12-05T01:00:18,761 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T01:00:18,765 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:00:18,765 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:00:18,766 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:00:18,766 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:00:18,766 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0. 2024-12-05T01:00:18,766 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906. 2024-12-05T01:00:18,766 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 5033a9c1e8deb7792e7f44f66f21f1f0: Waiting for close lock at 1733360418760Running coprocessor pre-close hooks at 1733360418760Disabling compacts and flushes for region at 1733360418760Disabling writes for close at 1733360418761 (+1 ms)Writing region close event to WAL at 1733360418762 (+1 ms)Running coprocessor post-close hooks at 1733360418766 (+4 ms)Closed at 1733360418766 2024-12-05T01:00:18,766 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for ed60f0bae8dcbefaa675e8882731c906: Waiting for close lock at 1733360418761Running coprocessor pre-close hooks at 1733360418761Disabling compacts and flushes for region at 1733360418761Disabling writes for close at 1733360418761Writing region close event to WAL at 1733360418762 (+1 ms)Running coprocessor post-close hooks at 1733360418766 (+4 ms)Closed at 1733360418766 2024-12-05T01:00:18,768 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed ed60f0bae8dcbefaa675e8882731c906 2024-12-05T01:00:18,769 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=ed60f0bae8dcbefaa675e8882731c906, regionState=CLOSED 2024-12-05T01:00:18,769 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T01:00:18,770 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=5033a9c1e8deb7792e7f44f66f21f1f0, regionState=CLOSED 2024-12-05T01:00:18,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure ed60f0bae8dcbefaa675e8882731c906, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:00:18,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:00:18,775 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=40 2024-12-05T01:00:18,775 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure ed60f0bae8dcbefaa675e8882731c906, server=2113c16e5528,32927,1733360288712 in 166 msec 2024-12-05T01:00:18,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-12-05T01:00:18,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ed60f0bae8dcbefaa675e8882731c906, UNASSIGN in 173 msec 2024-12-05T01:00:18,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 5033a9c1e8deb7792e7f44f66f21f1f0, server=2113c16e5528,41191,1733360288910 in 168 msec 2024-12-05T01:00:18,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=39 2024-12-05T01:00:18,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5033a9c1e8deb7792e7f44f66f21f1f0, UNASSIGN in 174 msec 2024-12-05T01:00:18,782 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-05T01:00:18,782 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 179 msec 2024-12-05T01:00:18,783 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360418783"}]},"ts":"1733360418783"} 2024-12-05T01:00:18,785 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-05T01:00:18,785 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-05T01:00:18,787 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 193 msec 2024-12-05T01:00:18,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-05T01:00:18,912 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-05T01:00:18,912 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,914 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,915 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,917 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,920 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906 2024-12-05T01:00:18,920 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T01:00:18,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,922 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-05T01:00:18,922 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-05T01:00:18,925 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-05T01:00:18,925 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-05T01:00:18,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:18,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:18,926 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/recovered.edits] 2024-12-05T01:00:18,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:18,926 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/recovered.edits] 2024-12-05T01:00:18,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:18,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-05T01:00:18,932 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/cf/7527ff86931145ff931f53f4978804af to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/cf/7527ff86931145ff931f53f4978804af 2024-12-05T01:00:18,933 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/cf/b33bfc3a3f1d441db35be3bdcc370588 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/cf/b33bfc3a3f1d441db35be3bdcc370588 2024-12-05T01:00:18,936 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906/recovered.edits/9.seqid 2024-12-05T01:00:18,936 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0/recovered.edits/9.seqid 2024-12-05T01:00:18,937 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/ed60f0bae8dcbefaa675e8882731c906 2024-12-05T01:00:18,937 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSplitRegion/5033a9c1e8deb7792e7f44f66f21f1f0 2024-12-05T01:00:18,937 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-05T01:00:18,940 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,943 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-05T01:00:18,945 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-05T01:00:18,946 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,947 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-05T01:00:18,947 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360418947"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:18,947 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360418947"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:18,950 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T01:00:18,950 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ed60f0bae8dcbefaa675e8882731c906, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733360297212.ed60f0bae8dcbefaa675e8882731c906.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5033a9c1e8deb7792e7f44f66f21f1f0, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733360297212.5033a9c1e8deb7792e7f44f66f21f1f0.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T01:00:18,950 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-05T01:00:18,950 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360418950"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:18,952 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-05T01:00:18,953 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:18,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 41 msec 2024-12-05T01:00:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-05T01:00:19,032 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:19,032 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-05T01:00:19,050 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-05T01:00:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:19,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-05T01:00:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:19,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-05T01:00:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:19,094 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=755 (was 715) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:42000 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/2113c16e5528:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/2113c16e5528:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40627 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 13606) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1371 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1536718241_1 at /127.0.0.1:36920 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:36944 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:48948 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1536718241_1 at /127.0.0.1:41992 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:40627 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/2113c16e5528:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=795 (was 771) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=514 (was 359) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=4079 (was 9811) 2024-12-05T01:00:19,094 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=755 is superior to 500 2024-12-05T01:00:19,110 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=755, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=514, ProcessCount=20, AvailableMemoryMB=4077 2024-12-05T01:00:19,110 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=755 is superior to 500 2024-12-05T01:00:19,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:00:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-05T01:00:19,114 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:00:19,114 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:00:19,114 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-05T01:00:19,115 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:00:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-05T01:00:19,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741907_1083 (size=406) 2024-12-05T01:00:19,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741907_1083 (size=406) 2024-12-05T01:00:19,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741907_1083 (size=406) 2024-12-05T01:00:19,126 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f68cb78cbca3f512482602e540a6bd5f, NAME => 'testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:19,126 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a41cd4c9195c6ab018dd14b9f1f2c03b, NAME => 'testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:19,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741908_1084 (size=67) 2024-12-05T01:00:19,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741908_1084 (size=67) 2024-12-05T01:00:19,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741908_1084 (size=67) 2024-12-05T01:00:19,139 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:19,139 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing a41cd4c9195c6ab018dd14b9f1f2c03b, disabling compactions & flushes 2024-12-05T01:00:19,139 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:19,140 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:19,140 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. after waiting 0 ms 2024-12-05T01:00:19,140 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:19,140 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:19,140 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for a41cd4c9195c6ab018dd14b9f1f2c03b: Waiting for close lock at 1733360419139Disabling compacts and flushes for region at 1733360419139Disabling writes for close at 1733360419140 (+1 ms)Writing region close event to WAL at 1733360419140Closed at 1733360419140 2024-12-05T01:00:19,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741909_1085 (size=67) 2024-12-05T01:00:19,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741909_1085 (size=67) 2024-12-05T01:00:19,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741909_1085 (size=67) 2024-12-05T01:00:19,145 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:19,145 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing f68cb78cbca3f512482602e540a6bd5f, disabling compactions & flushes 2024-12-05T01:00:19,145 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:19,145 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:19,145 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. after waiting 0 ms 2024-12-05T01:00:19,145 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:19,145 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:19,145 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for f68cb78cbca3f512482602e540a6bd5f: Waiting for close lock at 1733360419145Disabling compacts and flushes for region at 1733360419145Disabling writes for close at 1733360419145Writing region close event to WAL at 1733360419145Closed at 1733360419145 2024-12-05T01:00:19,146 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:00:19,147 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733360419147"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360419147"}]},"ts":"1733360419147"} 2024-12-05T01:00:19,147 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733360419147"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360419147"}]},"ts":"1733360419147"} 2024-12-05T01:00:19,150 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:00:19,151 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:00:19,151 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360419151"}]},"ts":"1733360419151"} 2024-12-05T01:00:19,153 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-05T01:00:19,153 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:00:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:00:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:00:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:00:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:00:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:00:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:00:19,155 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:00:19,155 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:00:19,155 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:00:19,155 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:00:19,156 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=a41cd4c9195c6ab018dd14b9f1f2c03b, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f68cb78cbca3f512482602e540a6bd5f, ASSIGN}] 2024-12-05T01:00:19,157 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f68cb78cbca3f512482602e540a6bd5f, ASSIGN 2024-12-05T01:00:19,157 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=a41cd4c9195c6ab018dd14b9f1f2c03b, ASSIGN 2024-12-05T01:00:19,158 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=a41cd4c9195c6ab018dd14b9f1f2c03b, ASSIGN; state=OFFLINE, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T01:00:19,158 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f68cb78cbca3f512482602e540a6bd5f, ASSIGN; state=OFFLINE, location=2113c16e5528,44435,1733360288855; forceNewPlan=false, retain=false 2024-12-05T01:00:19,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-05T01:00:19,309 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:00:19,309 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=f68cb78cbca3f512482602e540a6bd5f, regionState=OPENING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:00:19,309 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=a41cd4c9195c6ab018dd14b9f1f2c03b, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:19,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=a41cd4c9195c6ab018dd14b9f1f2c03b, ASSIGN because future has completed 2024-12-05T01:00:19,312 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:00:19,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f68cb78cbca3f512482602e540a6bd5f, ASSIGN because future has completed 2024-12-05T01:00:19,314 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure f68cb78cbca3f512482602e540a6bd5f, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:00:19,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-05T01:00:19,467 DEBUG [RSProcedureDispatcher-pool-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T01:00:19,469 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:19,469 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => a41cd4c9195c6ab018dd14b9f1f2c03b, NAME => 'testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T01:00:19,470 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33117, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T01:00:19,470 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. service=AccessControlService 2024-12-05T01:00:19,470 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:00:19,470 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,471 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:19,471 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,471 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,472 INFO [StoreOpener-a41cd4c9195c6ab018dd14b9f1f2c03b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,473 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:19,473 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => f68cb78cbca3f512482602e540a6bd5f, NAME => 'testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T01:00:19,473 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. service=AccessControlService 2024-12-05T01:00:19,473 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:00:19,474 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,474 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:19,474 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,474 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,474 INFO [StoreOpener-a41cd4c9195c6ab018dd14b9f1f2c03b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a41cd4c9195c6ab018dd14b9f1f2c03b columnFamilyName cf 2024-12-05T01:00:19,474 DEBUG [StoreOpener-a41cd4c9195c6ab018dd14b9f1f2c03b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:00:19,475 INFO [StoreOpener-a41cd4c9195c6ab018dd14b9f1f2c03b-1 {}] regionserver.HStore(327): Store=a41cd4c9195c6ab018dd14b9f1f2c03b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:00:19,475 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,475 INFO [StoreOpener-f68cb78cbca3f512482602e540a6bd5f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,476 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,476 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,476 INFO [StoreOpener-f68cb78cbca3f512482602e540a6bd5f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f68cb78cbca3f512482602e540a6bd5f columnFamilyName cf 2024-12-05T01:00:19,476 DEBUG [StoreOpener-f68cb78cbca3f512482602e540a6bd5f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:00:19,477 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,477 INFO [StoreOpener-f68cb78cbca3f512482602e540a6bd5f-1 {}] regionserver.HStore(327): Store=f68cb78cbca3f512482602e540a6bd5f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:00:19,477 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,477 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,478 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,478 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,478 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,478 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,479 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,480 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,481 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:00:19,481 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened a41cd4c9195c6ab018dd14b9f1f2c03b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73845652, jitterRate=0.10038596391677856}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:00:19,482 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,482 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:00:19,482 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for a41cd4c9195c6ab018dd14b9f1f2c03b: Running coprocessor pre-open hook at 1733360419471Writing region info on filesystem at 1733360419471Initializing all the Stores at 1733360419472 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360419472Cleaning up temporary data from old regions at 1733360419477 (+5 ms)Running coprocessor post-open hooks at 1733360419482 (+5 ms)Region opened successfully at 1733360419482 2024-12-05T01:00:19,483 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened f68cb78cbca3f512482602e540a6bd5f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70964006, jitterRate=0.05744609236717224}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:00:19,483 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,483 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for f68cb78cbca3f512482602e540a6bd5f: Running coprocessor pre-open hook at 1733360419474Writing region info on filesystem at 1733360419474Initializing all the Stores at 1733360419475 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360419475Cleaning up temporary data from old regions at 1733360419478 (+3 ms)Running coprocessor post-open hooks at 1733360419483 (+5 ms)Region opened successfully at 1733360419483 2024-12-05T01:00:19,483 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b., pid=48, masterSystemTime=1733360419465 2024-12-05T01:00:19,484 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f., pid=49, masterSystemTime=1733360419467 2024-12-05T01:00:19,485 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:19,485 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:19,486 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=a41cd4c9195c6ab018dd14b9f1f2c03b, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:19,486 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:19,486 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:19,486 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=f68cb78cbca3f512482602e540a6bd5f, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:00:19,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:00:19,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure f68cb78cbca3f512482602e540a6bd5f, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:00:19,490 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=46 2024-12-05T01:00:19,490 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b, server=2113c16e5528,32927,1733360288712 in 176 msec 2024-12-05T01:00:19,492 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=47 2024-12-05T01:00:19,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=a41cd4c9195c6ab018dd14b9f1f2c03b, ASSIGN in 335 msec 2024-12-05T01:00:19,492 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure f68cb78cbca3f512482602e540a6bd5f, server=2113c16e5528,44435,1733360288855 in 176 msec 2024-12-05T01:00:19,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-12-05T01:00:19,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f68cb78cbca3f512482602e540a6bd5f, ASSIGN in 337 msec 2024-12-05T01:00:19,494 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:00:19,494 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360419494"}]},"ts":"1733360419494"} 2024-12-05T01:00:19,496 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-05T01:00:19,497 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:00:19,497 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-05T01:00:19,499 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-05T01:00:19,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:19,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:19,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:19,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:19,506 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:19,506 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:19,506 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:19,506 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:19,507 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 393 msec 2024-12-05T01:00:19,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-05T01:00:19,742 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-05T01:00:19,742 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-05T01:00:19,742 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:00:19,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-05T01:00:19,747 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:00:19,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-12-05T01:00:19,747 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T01:00:19,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-05T01:00:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360419750 (current time:1733360419750). 2024-12-05T01:00:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:00:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-05T01:00:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:00:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49fdaeb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:00:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:00:19,752 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:00:19,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:00:19,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:00:19,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a81396c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:19,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:00:19,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:00:19,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:19,754 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53166, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:00:19,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@219190f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:19,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:00:19,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:00:19,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:00:19,757 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60680, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:19,758 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:00:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:00:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:19,758 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:00:19,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63b595ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:19,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:00:19,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:00:19,760 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:00:19,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:00:19,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:00:19,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77a2bd4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:19,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:00:19,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:00:19,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:19,761 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53170, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:00:19,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@430d3d19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:19,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:00:19,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:00:19,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:00:19,764 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60690, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:19,766 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:00:19,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:00:19,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:00:19,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:19,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:19,767 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:00:19,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-05T01:00:19,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:00:19,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-05T01:00:19,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-05T01:00:19,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-05T01:00:19,770 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:00:19,771 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:00:19,774 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:00:19,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741910_1086 (size=167) 2024-12-05T01:00:19,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741910_1086 (size=167) 2024-12-05T01:00:19,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741910_1086 (size=167) 2024-12-05T01:00:19,782 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:00:19,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f68cb78cbca3f512482602e540a6bd5f}] 2024-12-05T01:00:19,783 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:19,783 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-05T01:00:19,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-05T01:00:19,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-05T01:00:19,935 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:19,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:19,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for f68cb78cbca3f512482602e540a6bd5f: 2024-12-05T01:00:19,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for a41cd4c9195c6ab018dd14b9f1f2c03b: 2024-12-05T01:00:19,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. for emptySnaptb0-testExportWithTargetName completed. 2024-12-05T01:00:19,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. for emptySnaptb0-testExportWithTargetName completed. 2024-12-05T01:00:19,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-05T01:00:19,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:00:19,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:00:19,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-05T01:00:19,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:00:19,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:00:19,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741911_1087 (size=70) 2024-12-05T01:00:19,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741911_1087 (size=70) 2024-12-05T01:00:19,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741911_1087 (size=70) 2024-12-05T01:00:19,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:19,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-05T01:00:19,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-05T01:00:19,948 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,948 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:19,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f68cb78cbca3f512482602e540a6bd5f in 167 msec 2024-12-05T01:00:19,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741912_1088 (size=70) 2024-12-05T01:00:19,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741912_1088 (size=70) 2024-12-05T01:00:19,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741912_1088 (size=70) 2024-12-05T01:00:20,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-05T01:00:20,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:20,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-05T01:00:20,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-05T01:00:20,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:20,354 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:20,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-12-05T01:00:20,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b in 574 msec 2024-12-05T01:00:20,358 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:00:20,358 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:00:20,359 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:00:20,359 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-05T01:00:20,360 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-05T01:00:20,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741913_1089 (size=549) 2024-12-05T01:00:20,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741913_1089 (size=549) 2024-12-05T01:00:20,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741913_1089 (size=549) 2024-12-05T01:00:20,373 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:00:20,377 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:00:20,378 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-05T01:00:20,379 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:00:20,379 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-05T01:00:20,381 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 611 msec 2024-12-05T01:00:20,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-05T01:00:20,401 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-05T01:00:20,405 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='04e076ef0392fe86d52795388ebed6612', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:00:20,406 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='13364a3b6eb3ad8be7780ce57627be385', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:00:20,408 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='2e579bd0a5761b354e3cac44187e82d2e', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:00:20,408 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='3e2cc74ba882a321090edfd0e3f776287', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:00:20,413 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:00:20,413 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:00:20,415 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60880, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:20,418 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44435 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:00:20,420 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T01:00:20,424 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-05T01:00:20,424 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:20,424 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:00:20,426 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T01:00:20,431 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T01:00:20,437 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T01:00:20,439 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-05T01:00:20,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360420439 (current time:1733360420439). 2024-12-05T01:00:20,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:00:20,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-05T01:00:20,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:00:20,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@371d472b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:20,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:00:20,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:00:20,441 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:00:20,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:00:20,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:00:20,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b0cbcd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:20,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:00:20,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:00:20,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:20,443 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53180, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:00:20,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63165e25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:20,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:00:20,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:00:20,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:00:20,446 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60696, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:20,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:00:20,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:00:20,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:20,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:20,447 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:00:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1832ca01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:00:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:00:20,449 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:00:20,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:00:20,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:00:20,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36bcf285, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:20,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:00:20,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:00:20,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:20,451 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53190, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:00:20,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68b7ebdd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:20,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:00:20,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:00:20,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:00:20,455 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60704, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:20,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:00:20,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:00:20,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:00:20,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:20,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:20,460 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:00:20,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-05T01:00:20,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:00:20,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-05T01:00:20,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-05T01:00:20,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-05T01:00:20,467 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:00:20,468 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:00:20,470 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:00:20,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741914_1090 (size=162) 2024-12-05T01:00:20,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741914_1090 (size=162) 2024-12-05T01:00:20,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741914_1090 (size=162) 2024-12-05T01:00:20,478 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:00:20,479 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f68cb78cbca3f512482602e540a6bd5f}] 2024-12-05T01:00:20,480 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:20,480 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:20,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-05T01:00:20,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-05T01:00:20,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-05T01:00:20,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:20,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:20,631 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing a41cd4c9195c6ab018dd14b9f1f2c03b 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-05T01:00:20,631 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing f68cb78cbca3f512482602e540a6bd5f 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-05T01:00:20,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/.tmp/cf/395fe7c6e9a64ac681dab23bea84af50 is 71, key is 069bd603741425954d768a0955300a40/cf:q/1733360420413/Put/seqid=0 2024-12-05T01:00:20,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/.tmp/cf/6b202cf7549745c0a1a3ae3169c40a48 is 71, key is 11e3753f982fb75d071a99157cd8a368/cf:q/1733360420417/Put/seqid=0 2024-12-05T01:00:20,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741915_1091 (size=5288) 2024-12-05T01:00:20,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741915_1091 (size=5288) 2024-12-05T01:00:20,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741915_1091 (size=5288) 2024-12-05T01:00:20,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741916_1092 (size=8324) 2024-12-05T01:00:20,666 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/.tmp/cf/395fe7c6e9a64ac681dab23bea84af50 2024-12-05T01:00:20,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741916_1092 (size=8324) 2024-12-05T01:00:20,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741916_1092 (size=8324) 2024-12-05T01:00:20,667 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/.tmp/cf/6b202cf7549745c0a1a3ae3169c40a48 2024-12-05T01:00:20,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/.tmp/cf/395fe7c6e9a64ac681dab23bea84af50 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/cf/395fe7c6e9a64ac681dab23bea84af50 2024-12-05T01:00:20,681 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/cf/395fe7c6e9a64ac681dab23bea84af50, entries=3, sequenceid=6, filesize=5.2 K 2024-12-05T01:00:20,682 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for a41cd4c9195c6ab018dd14b9f1f2c03b in 51ms, sequenceid=6, compaction requested=false 2024-12-05T01:00:20,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-05T01:00:20,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for a41cd4c9195c6ab018dd14b9f1f2c03b: 2024-12-05T01:00:20,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. for snaptb0-testExportWithTargetName completed. 2024-12-05T01:00:20,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-05T01:00:20,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:00:20,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/cf/395fe7c6e9a64ac681dab23bea84af50] hfiles 2024-12-05T01:00:20,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/cf/395fe7c6e9a64ac681dab23bea84af50 for snapshot=snaptb0-testExportWithTargetName 2024-12-05T01:00:20,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/.tmp/cf/6b202cf7549745c0a1a3ae3169c40a48 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/cf/6b202cf7549745c0a1a3ae3169c40a48 2024-12-05T01:00:20,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741917_1093 (size=109) 2024-12-05T01:00:20,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741917_1093 (size=109) 2024-12-05T01:00:20,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741917_1093 (size=109) 2024-12-05T01:00:20,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:20,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-05T01:00:20,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-05T01:00:20,699 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:20,699 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:20,703 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b in 221 msec 2024-12-05T01:00:20,703 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/cf/6b202cf7549745c0a1a3ae3169c40a48, entries=47, sequenceid=6, filesize=8.1 K 2024-12-05T01:00:20,704 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for f68cb78cbca3f512482602e540a6bd5f in 73ms, sequenceid=6, compaction requested=false 2024-12-05T01:00:20,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for f68cb78cbca3f512482602e540a6bd5f: 2024-12-05T01:00:20,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. for snaptb0-testExportWithTargetName completed. 2024-12-05T01:00:20,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-05T01:00:20,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:00:20,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/cf/6b202cf7549745c0a1a3ae3169c40a48] hfiles 2024-12-05T01:00:20,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/cf/6b202cf7549745c0a1a3ae3169c40a48 for snapshot=snaptb0-testExportWithTargetName 2024-12-05T01:00:20,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741918_1094 (size=109) 2024-12-05T01:00:20,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741918_1094 (size=109) 2024-12-05T01:00:20,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741918_1094 (size=109) 2024-12-05T01:00:20,716 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:20,716 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-05T01:00:20,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-05T01:00:20,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:20,717 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:20,720 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-12-05T01:00:20,720 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f68cb78cbca3f512482602e540a6bd5f in 239 msec 2024-12-05T01:00:20,720 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:00:20,721 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:00:20,721 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:00:20,721 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-05T01:00:20,722 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-05T01:00:20,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741919_1095 (size=627) 2024-12-05T01:00:20,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741919_1095 (size=627) 2024-12-05T01:00:20,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741919_1095 (size=627) 2024-12-05T01:00:20,739 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:00:20,745 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:00:20,746 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-05T01:00:20,747 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:00:20,747 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-05T01:00:20,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 282 msec 2024-12-05T01:00:20,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-05T01:00:20,782 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-05T01:00:20,782 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360420782 2024-12-05T01:00:20,783 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37149, tgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360420782, rawTgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360420782, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:20,830 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:20,830 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360420782, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360420782/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-05T01:00:20,834 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T01:00:20,845 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360420782/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-05T01:00:20,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741920_1096 (size=162) 2024-12-05T01:00:20,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741920_1096 (size=162) 2024-12-05T01:00:20,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741921_1097 (size=627) 2024-12-05T01:00:20,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741921_1097 (size=627) 2024-12-05T01:00:20,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741920_1096 (size=162) 2024-12-05T01:00:20,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741921_1097 (size=627) 2024-12-05T01:00:20,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741922_1098 (size=154) 2024-12-05T01:00:20,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741922_1098 (size=154) 2024-12-05T01:00:20,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741922_1098 (size=154) 2024-12-05T01:00:20,892 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:20,892 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:20,893 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:21,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-890900669130363824.jar 2024-12-05T01:00:21,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:21,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:22,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-16907539554931422248.jar 2024-12-05T01:00:22,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:22,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:22,010 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:22,010 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:22,010 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:22,010 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:22,011 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T01:00:22,011 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T01:00:22,011 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T01:00:22,012 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T01:00:22,012 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T01:00:22,012 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T01:00:22,012 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T01:00:22,013 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T01:00:22,013 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T01:00:22,013 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T01:00:22,013 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T01:00:22,014 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:00:22,014 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:00:22,014 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:00:22,014 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:00:22,014 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:00:22,015 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:00:22,015 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:00:22,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741923_1099 (size=24020) 2024-12-05T01:00:22,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741923_1099 (size=24020) 2024-12-05T01:00:22,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741923_1099 (size=24020) 2024-12-05T01:00:22,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741924_1100 (size=77755) 2024-12-05T01:00:22,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741924_1100 (size=77755) 2024-12-05T01:00:22,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741924_1100 (size=77755) 2024-12-05T01:00:22,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741925_1101 (size=131360) 2024-12-05T01:00:22,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741925_1101 (size=131360) 2024-12-05T01:00:22,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741925_1101 (size=131360) 2024-12-05T01:00:22,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741926_1102 (size=111793) 2024-12-05T01:00:22,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741926_1102 (size=111793) 2024-12-05T01:00:22,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741926_1102 (size=111793) 2024-12-05T01:00:22,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741927_1103 (size=1832290) 2024-12-05T01:00:22,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741927_1103 (size=1832290) 2024-12-05T01:00:22,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741927_1103 (size=1832290) 2024-12-05T01:00:22,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741928_1104 (size=8360282) 2024-12-05T01:00:22,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741928_1104 (size=8360282) 2024-12-05T01:00:22,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741928_1104 (size=8360282) 2024-12-05T01:00:22,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741929_1105 (size=503880) 2024-12-05T01:00:22,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741929_1105 (size=503880) 2024-12-05T01:00:22,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741929_1105 (size=503880) 2024-12-05T01:00:22,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741930_1106 (size=443171) 2024-12-05T01:00:22,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741930_1106 (size=443171) 2024-12-05T01:00:22,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741930_1106 (size=443171) 2024-12-05T01:00:22,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741931_1107 (size=322274) 2024-12-05T01:00:22,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741931_1107 (size=322274) 2024-12-05T01:00:22,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741931_1107 (size=322274) 2024-12-05T01:00:22,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741932_1108 (size=20406) 2024-12-05T01:00:22,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741932_1108 (size=20406) 2024-12-05T01:00:22,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741932_1108 (size=20406) 2024-12-05T01:00:22,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741933_1109 (size=45609) 2024-12-05T01:00:22,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741933_1109 (size=45609) 2024-12-05T01:00:22,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741933_1109 (size=45609) 2024-12-05T01:00:22,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741934_1110 (size=136454) 2024-12-05T01:00:22,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741934_1110 (size=136454) 2024-12-05T01:00:22,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741934_1110 (size=136454) 2024-12-05T01:00:22,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741935_1111 (size=1597136) 2024-12-05T01:00:22,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741935_1111 (size=1597136) 2024-12-05T01:00:22,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741935_1111 (size=1597136) 2024-12-05T01:00:22,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741936_1112 (size=30873) 2024-12-05T01:00:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741936_1112 (size=30873) 2024-12-05T01:00:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741936_1112 (size=30873) 2024-12-05T01:00:22,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741937_1113 (size=29229) 2024-12-05T01:00:22,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741937_1113 (size=29229) 2024-12-05T01:00:22,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741937_1113 (size=29229) 2024-12-05T01:00:22,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741938_1114 (size=903858) 2024-12-05T01:00:22,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741938_1114 (size=903858) 2024-12-05T01:00:22,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741938_1114 (size=903858) 2024-12-05T01:00:22,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741939_1115 (size=6424744) 2024-12-05T01:00:22,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741939_1115 (size=6424744) 2024-12-05T01:00:22,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741939_1115 (size=6424744) 2024-12-05T01:00:22,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741940_1116 (size=5175431) 2024-12-05T01:00:22,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741940_1116 (size=5175431) 2024-12-05T01:00:22,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741940_1116 (size=5175431) 2024-12-05T01:00:22,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741941_1117 (size=232881) 2024-12-05T01:00:22,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741941_1117 (size=232881) 2024-12-05T01:00:22,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741941_1117 (size=232881) 2024-12-05T01:00:22,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741942_1118 (size=1323991) 2024-12-05T01:00:22,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741942_1118 (size=1323991) 2024-12-05T01:00:22,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741942_1118 (size=1323991) 2024-12-05T01:00:22,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741943_1119 (size=4695811) 2024-12-05T01:00:22,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741943_1119 (size=4695811) 2024-12-05T01:00:22,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741943_1119 (size=4695811) 2024-12-05T01:00:22,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741944_1120 (size=1877034) 2024-12-05T01:00:22,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741944_1120 (size=1877034) 2024-12-05T01:00:22,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741944_1120 (size=1877034) 2024-12-05T01:00:22,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741945_1121 (size=217555) 2024-12-05T01:00:22,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741945_1121 (size=217555) 2024-12-05T01:00:22,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741945_1121 (size=217555) 2024-12-05T01:00:22,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741946_1122 (size=4188619) 2024-12-05T01:00:22,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741946_1122 (size=4188619) 2024-12-05T01:00:22,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741946_1122 (size=4188619) 2024-12-05T01:00:22,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741947_1123 (size=127628) 2024-12-05T01:00:22,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741947_1123 (size=127628) 2024-12-05T01:00:22,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741947_1123 (size=127628) 2024-12-05T01:00:22,391 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T01:00:22,394 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-05T01:00:22,396 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-05T01:00:22,396 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-05T01:00:22,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741948_1124 (size=445) 2024-12-05T01:00:22,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741948_1124 (size=445) 2024-12-05T01:00:22,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741948_1124 (size=445) 2024-12-05T01:00:22,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741949_1125 (size=21) 2024-12-05T01:00:22,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741949_1125 (size=21) 2024-12-05T01:00:22,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741949_1125 (size=21) 2024-12-05T01:00:22,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741950_1126 (size=304084) 2024-12-05T01:00:22,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741950_1126 (size=304084) 2024-12-05T01:00:22,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741950_1126 (size=304084) 2024-12-05T01:00:22,710 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:00:22,710 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:00:22,715 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0001_000001 (auth:SIMPLE) from 127.0.0.1:42970 2024-12-05T01:00:22,721 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0001/container_1733360294895_0001_01_000001/launch_container.sh] 2024-12-05T01:00:22,721 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0001/container_1733360294895_0001_01_000001/container_tokens] 2024-12-05T01:00:22,721 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0001/container_1733360294895_0001_01_000001/sysfs] 2024-12-05T01:00:23,200 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0002_000001 (auth:SIMPLE) from 127.0.0.1:47824 2024-12-05T01:00:23,864 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:00:28,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-05T01:00:28,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-05T01:00:28,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:28,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T01:00:28,675 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0002_000001 (auth:SIMPLE) from 127.0.0.1:55464 2024-12-05T01:00:28,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741951_1127 (size=349782) 2024-12-05T01:00:29,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741951_1127 (size=349782) 2024-12-05T01:00:29,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741951_1127 (size=349782) 2024-12-05T01:00:30,925 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0002_000001 (auth:SIMPLE) from 127.0.0.1:48932 2024-12-05T01:00:30,926 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0002_000001 (auth:SIMPLE) from 127.0.0.1:39986 2024-12-05T01:00:33,808 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:00:34,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741952_1128 (size=8324) 2024-12-05T01:00:34,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741952_1128 (size=8324) 2024-12-05T01:00:34,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741952_1128 (size=8324) 2024-12-05T01:00:35,139 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_0/usercache/jenkins/appcache/application_1733360294895_0002/container_1733360294895_0002_01_000002/launch_container.sh] 2024-12-05T01:00:35,139 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_0/usercache/jenkins/appcache/application_1733360294895_0002/container_1733360294895_0002_01_000002/container_tokens] 2024-12-05T01:00:35,139 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_0/usercache/jenkins/appcache/application_1733360294895_0002/container_1733360294895_0002_01_000002/sysfs] 2024-12-05T01:00:35,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741954_1130 (size=5288) 2024-12-05T01:00:35,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741954_1130 (size=5288) 2024-12-05T01:00:35,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741954_1130 (size=5288) 2024-12-05T01:00:36,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741953_1129 (size=22163) 2024-12-05T01:00:36,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741953_1129 (size=22163) 2024-12-05T01:00:36,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741953_1129 (size=22163) 2024-12-05T01:00:36,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741955_1131 (size=464) 2024-12-05T01:00:36,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741955_1131 (size=464) 2024-12-05T01:00:36,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741955_1131 (size=464) 2024-12-05T01:00:36,217 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0002/container_1733360294895_0002_01_000003/launch_container.sh] 2024-12-05T01:00:36,217 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0002/container_1733360294895_0002_01_000003/container_tokens] 2024-12-05T01:00:36,217 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0002/container_1733360294895_0002_01_000003/sysfs] 2024-12-05T01:00:36,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741956_1132 (size=22163) 2024-12-05T01:00:36,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741956_1132 (size=22163) 2024-12-05T01:00:36,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741956_1132 (size=22163) 2024-12-05T01:00:36,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741957_1133 (size=349782) 2024-12-05T01:00:36,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741957_1133 (size=349782) 2024-12-05T01:00:36,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741957_1133 (size=349782) 2024-12-05T01:00:36,402 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0002_000001 (auth:SIMPLE) from 127.0.0.1:39990 2024-12-05T01:00:36,696 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:00:37,662 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T01:00:37,664 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T01:00:37,726 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-05T01:00:37,726 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T01:00:37,727 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T01:00:37,727 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-05T01:00:37,728 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-05T01:00:37,728 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-05T01:00:37,728 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360420782/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360420782/.hbase-snapshot/testExportWithTargetName 2024-12-05T01:00:37,729 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360420782/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-05T01:00:37,729 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360420782/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-05T01:00:37,760 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-05T01:00:37,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-05T01:00:37,765 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360437764"}]},"ts":"1733360437764"} 2024-12-05T01:00:37,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-05T01:00:37,767 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-05T01:00:37,768 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-05T01:00:37,769 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-05T01:00:37,771 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=a41cd4c9195c6ab018dd14b9f1f2c03b, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f68cb78cbca3f512482602e540a6bd5f, UNASSIGN}] 2024-12-05T01:00:37,772 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f68cb78cbca3f512482602e540a6bd5f, UNASSIGN 2024-12-05T01:00:37,773 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=a41cd4c9195c6ab018dd14b9f1f2c03b, UNASSIGN 2024-12-05T01:00:37,774 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=f68cb78cbca3f512482602e540a6bd5f, regionState=CLOSING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:00:37,774 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=a41cd4c9195c6ab018dd14b9f1f2c03b, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:37,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f68cb78cbca3f512482602e540a6bd5f, UNASSIGN because future has completed 2024-12-05T01:00:37,777 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:00:37,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure f68cb78cbca3f512482602e540a6bd5f, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:00:37,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=a41cd4c9195c6ab018dd14b9f1f2c03b, UNASSIGN because future has completed 2024-12-05T01:00:37,778 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:00:37,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:00:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-05T01:00:37,931 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:37,932 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:00:37,932 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing a41cd4c9195c6ab018dd14b9f1f2c03b, disabling compactions & flushes 2024-12-05T01:00:37,932 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:37,932 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:37,932 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. after waiting 0 ms 2024-12-05T01:00:37,932 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:37,941 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:37,941 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:00:37,941 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing f68cb78cbca3f512482602e540a6bd5f, disabling compactions & flushes 2024-12-05T01:00:37,941 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:37,941 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:37,941 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. after waiting 0 ms 2024-12-05T01:00:37,942 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:38,019 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:00:38,020 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:00:38,021 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b. 2024-12-05T01:00:38,021 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for a41cd4c9195c6ab018dd14b9f1f2c03b: Waiting for close lock at 1733360437932Running coprocessor pre-close hooks at 1733360437932Disabling compacts and flushes for region at 1733360437932Disabling writes for close at 1733360437932Writing region close event to WAL at 1733360437960 (+28 ms)Running coprocessor post-close hooks at 1733360438020 (+60 ms)Closed at 1733360438021 (+1 ms) 2024-12-05T01:00:38,024 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:38,028 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=a41cd4c9195c6ab018dd14b9f1f2c03b, regionState=CLOSED 2024-12-05T01:00:38,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:00:38,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=58 2024-12-05T01:00:38,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure a41cd4c9195c6ab018dd14b9f1f2c03b, server=2113c16e5528,32927,1733360288712 in 257 msec 2024-12-05T01:00:38,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=a41cd4c9195c6ab018dd14b9f1f2c03b, UNASSIGN in 267 msec 2024-12-05T01:00:38,048 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:00:38,050 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:00:38,050 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f. 2024-12-05T01:00:38,050 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for f68cb78cbca3f512482602e540a6bd5f: Waiting for close lock at 1733360437941Running coprocessor pre-close hooks at 1733360437941Disabling compacts and flushes for region at 1733360437941Disabling writes for close at 1733360437942 (+1 ms)Writing region close event to WAL at 1733360438006 (+64 ms)Running coprocessor post-close hooks at 1733360438050 (+44 ms)Closed at 1733360438050 2024-12-05T01:00:38,053 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:38,054 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=f68cb78cbca3f512482602e540a6bd5f, regionState=CLOSED 2024-12-05T01:00:38,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure f68cb78cbca3f512482602e540a6bd5f, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:00:38,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=59 2024-12-05T01:00:38,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure f68cb78cbca3f512482602e540a6bd5f, server=2113c16e5528,44435,1733360288855 in 282 msec 2024-12-05T01:00:38,063 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-12-05T01:00:38,063 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f68cb78cbca3f512482602e540a6bd5f, UNASSIGN in 290 msec 2024-12-05T01:00:38,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-05T01:00:38,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 296 msec 2024-12-05T01:00:38,075 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360438074"}]},"ts":"1733360438074"} 2024-12-05T01:00:38,077 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-05T01:00:38,077 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-05T01:00:38,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 318 msec 2024-12-05T01:00:38,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-05T01:00:38,082 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-05T01:00:38,082 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-05T01:00:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T01:00:38,084 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T01:00:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-05T01:00:38,085 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T01:00:38,090 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-05T01:00:38,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T01:00:38,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T01:00:38,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T01:00:38,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T01:00:38,095 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-05T01:00:38,096 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-05T01:00:38,096 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-05T01:00:38,096 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-05T01:00:38,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T01:00:38,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T01:00:38,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:38,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T01:00:38,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T01:00:38,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:38,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:38,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-05T01:00:38,119 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:38,120 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:38,128 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/recovered.edits] 2024-12-05T01:00:38,137 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/recovered.edits] 2024-12-05T01:00:38,141 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/cf/6b202cf7549745c0a1a3ae3169c40a48 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/cf/6b202cf7549745c0a1a3ae3169c40a48 2024-12-05T01:00:38,144 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/cf/395fe7c6e9a64ac681dab23bea84af50 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/cf/395fe7c6e9a64ac681dab23bea84af50 2024-12-05T01:00:38,152 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f/recovered.edits/9.seqid 2024-12-05T01:00:38,155 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/f68cb78cbca3f512482602e540a6bd5f 2024-12-05T01:00:38,158 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b/recovered.edits/9.seqid 2024-12-05T01:00:38,159 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithTargetName/a41cd4c9195c6ab018dd14b9f1f2c03b 2024-12-05T01:00:38,159 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-05T01:00:38,163 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T01:00:38,167 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-05T01:00:38,170 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-05T01:00:38,172 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T01:00:38,172 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-05T01:00:38,172 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360438172"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:38,173 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360438172"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:38,176 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T01:00:38,176 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => a41cd4c9195c6ab018dd14b9f1f2c03b, NAME => 'testtb-testExportWithTargetName,,1733360419111.a41cd4c9195c6ab018dd14b9f1f2c03b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f68cb78cbca3f512482602e540a6bd5f, NAME => 'testtb-testExportWithTargetName,1,1733360419111.f68cb78cbca3f512482602e540a6bd5f.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T01:00:38,176 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-05T01:00:38,176 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360438176"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:38,179 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-05T01:00:38,180 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T01:00:38,181 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 97 msec 2024-12-05T01:00:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-05T01:00:38,202 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-05T01:00:38,203 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-05T01:00:38,219 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-05T01:00:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-05T01:00:38,229 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-05T01:00:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-05T01:00:38,306 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-05T01:00:38,307 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=787 (was 755) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:40749 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:36580 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41563 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44033 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2021 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 16062) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:44033 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-161055371_1 at /127.0.0.1:41124 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-161055371_1 at /127.0.0.1:50240 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:50262 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38489 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:38489 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=801 (was 795) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=583 (was 514) - SystemLoadAverage LEAK? -, ProcessCount=29 (was 20) - ProcessCount LEAK? -, AvailableMemoryMB=5543 (was 4077) - AvailableMemoryMB LEAK? - 2024-12-05T01:00:38,308 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=787 is superior to 500 2024-12-05T01:00:38,353 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=787, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=583, ProcessCount=29, AvailableMemoryMB=5526 2024-12-05T01:00:38,353 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=787 is superior to 500 2024-12-05T01:00:38,357 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:00:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T01:00:38,362 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:00:38,362 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:00:38,363 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-05T01:00:38,363 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:00:38,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-05T01:00:38,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-05T01:00:38,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741958_1134 (size=404) 2024-12-05T01:00:38,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741958_1134 (size=404) 2024-12-05T01:00:38,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741958_1134 (size=404) 2024-12-05T01:00:38,526 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7d09f955f3f0039510e79bccb1720562, NAME => 'testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:38,539 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 99e1c29b53be5af6eac7ef1de4e1fe65, NAME => 'testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:38,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741959_1135 (size=65) 2024-12-05T01:00:38,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741959_1135 (size=65) 2024-12-05T01:00:38,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741959_1135 (size=65) 2024-12-05T01:00:38,674 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:38,674 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 7d09f955f3f0039510e79bccb1720562, disabling compactions & flushes 2024-12-05T01:00:38,674 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:38,674 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:38,674 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. after waiting 0 ms 2024-12-05T01:00:38,674 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:38,674 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:38,674 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7d09f955f3f0039510e79bccb1720562: Waiting for close lock at 1733360438674Disabling compacts and flushes for region at 1733360438674Disabling writes for close at 1733360438674Writing region close event to WAL at 1733360438674Closed at 1733360438674 2024-12-05T01:00:38,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-05T01:00:38,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741960_1136 (size=65) 2024-12-05T01:00:38,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741960_1136 (size=65) 2024-12-05T01:00:38,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741960_1136 (size=65) 2024-12-05T01:00:38,703 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:38,703 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 99e1c29b53be5af6eac7ef1de4e1fe65, disabling compactions & flushes 2024-12-05T01:00:38,704 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:38,704 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:38,704 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. after waiting 0 ms 2024-12-05T01:00:38,704 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:38,704 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:38,704 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 99e1c29b53be5af6eac7ef1de4e1fe65: Waiting for close lock at 1733360438703Disabling compacts and flushes for region at 1733360438703Disabling writes for close at 1733360438704 (+1 ms)Writing region close event to WAL at 1733360438704Closed at 1733360438704 2024-12-05T01:00:38,705 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:00:38,706 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733360438705"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360438705"}]},"ts":"1733360438705"} 2024-12-05T01:00:38,706 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733360438705"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360438705"}]},"ts":"1733360438705"} 2024-12-05T01:00:38,712 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:00:38,713 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:00:38,713 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360438713"}]},"ts":"1733360438713"} 2024-12-05T01:00:38,716 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-05T01:00:38,717 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:00:38,718 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:00:38,718 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:00:38,719 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:00:38,719 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:00:38,719 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:00:38,719 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:00:38,719 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:00:38,719 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:00:38,719 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:00:38,719 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:00:38,719 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7d09f955f3f0039510e79bccb1720562, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=99e1c29b53be5af6eac7ef1de4e1fe65, ASSIGN}] 2024-12-05T01:00:38,721 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7d09f955f3f0039510e79bccb1720562, ASSIGN 2024-12-05T01:00:38,722 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7d09f955f3f0039510e79bccb1720562, ASSIGN; state=OFFLINE, location=2113c16e5528,41191,1733360288910; forceNewPlan=false, retain=false 2024-12-05T01:00:38,723 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=99e1c29b53be5af6eac7ef1de4e1fe65, ASSIGN 2024-12-05T01:00:38,724 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=99e1c29b53be5af6eac7ef1de4e1fe65, ASSIGN; state=OFFLINE, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T01:00:38,873 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:00:38,873 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=99e1c29b53be5af6eac7ef1de4e1fe65, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:38,873 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=7d09f955f3f0039510e79bccb1720562, regionState=OPENING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:00:38,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7d09f955f3f0039510e79bccb1720562, ASSIGN because future has completed 2024-12-05T01:00:38,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7d09f955f3f0039510e79bccb1720562, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:00:38,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=99e1c29b53be5af6eac7ef1de4e1fe65, ASSIGN because future has completed 2024-12-05T01:00:38,887 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:00:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-05T01:00:39,046 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:39,046 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 99e1c29b53be5af6eac7ef1de4e1fe65, NAME => 'testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T01:00:39,046 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. service=AccessControlService 2024-12-05T01:00:39,047 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:00:39,047 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,047 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:39,047 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,047 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,067 INFO [StoreOpener-99e1c29b53be5af6eac7ef1de4e1fe65-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,070 INFO [StoreOpener-99e1c29b53be5af6eac7ef1de4e1fe65-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99e1c29b53be5af6eac7ef1de4e1fe65 columnFamilyName cf 2024-12-05T01:00:39,070 DEBUG [StoreOpener-99e1c29b53be5af6eac7ef1de4e1fe65-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:00:39,071 INFO [StoreOpener-99e1c29b53be5af6eac7ef1de4e1fe65-1 {}] regionserver.HStore(327): Store=99e1c29b53be5af6eac7ef1de4e1fe65/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:00:39,071 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,072 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,072 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,073 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,073 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,075 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,087 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:00:39,087 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 99e1c29b53be5af6eac7ef1de4e1fe65; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62986789, jitterRate=-0.06142370402812958}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:00:39,088 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,088 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 99e1c29b53be5af6eac7ef1de4e1fe65: Running coprocessor pre-open hook at 1733360439047Writing region info on filesystem at 1733360439047Initializing all the Stores at 1733360439049 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360439049Cleaning up temporary data from old regions at 1733360439073 (+24 ms)Running coprocessor post-open hooks at 1733360439088 (+15 ms)Region opened successfully at 1733360439088 2024-12-05T01:00:39,090 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65., pid=67, masterSystemTime=1733360439042 2024-12-05T01:00:39,093 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:39,093 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:39,094 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=99e1c29b53be5af6eac7ef1de4e1fe65, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:39,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:00:39,103 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=65 2024-12-05T01:00:39,103 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65, server=2113c16e5528,32927,1733360288712 in 214 msec 2024-12-05T01:00:39,107 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=99e1c29b53be5af6eac7ef1de4e1fe65, ASSIGN in 384 msec 2024-12-05T01:00:39,112 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:39,112 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => 7d09f955f3f0039510e79bccb1720562, NAME => 'testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T01:00:39,112 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. service=AccessControlService 2024-12-05T01:00:39,113 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:00:39,113 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,113 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:39,113 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,113 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,122 INFO [StoreOpener-7d09f955f3f0039510e79bccb1720562-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,124 INFO [StoreOpener-7d09f955f3f0039510e79bccb1720562-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d09f955f3f0039510e79bccb1720562 columnFamilyName cf 2024-12-05T01:00:39,125 DEBUG [StoreOpener-7d09f955f3f0039510e79bccb1720562-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:00:39,126 INFO [StoreOpener-7d09f955f3f0039510e79bccb1720562-1 {}] regionserver.HStore(327): Store=7d09f955f3f0039510e79bccb1720562/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:00:39,126 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,128 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,128 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,129 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,129 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,131 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,145 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:00:39,146 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened 7d09f955f3f0039510e79bccb1720562; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73757377, jitterRate=0.09907056391239166}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:00:39,146 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,146 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for 7d09f955f3f0039510e79bccb1720562: Running coprocessor pre-open hook at 1733360439113Writing region info on filesystem at 1733360439113Initializing all the Stores at 1733360439115 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360439115Cleaning up temporary data from old regions at 1733360439129 (+14 ms)Running coprocessor post-open hooks at 1733360439146 (+17 ms)Region opened successfully at 1733360439146 2024-12-05T01:00:39,147 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562., pid=66, masterSystemTime=1733360439038 2024-12-05T01:00:39,151 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:39,151 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:39,153 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=7d09f955f3f0039510e79bccb1720562, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:00:39,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7d09f955f3f0039510e79bccb1720562, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:00:39,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=64 2024-12-05T01:00:39,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure 7d09f955f3f0039510e79bccb1720562, server=2113c16e5528,41191,1733360288910 in 273 msec 2024-12-05T01:00:39,172 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-12-05T01:00:39,172 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7d09f955f3f0039510e79bccb1720562, ASSIGN in 450 msec 2024-12-05T01:00:39,173 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:00:39,173 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360439173"}]},"ts":"1733360439173"} 2024-12-05T01:00:39,175 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-05T01:00:39,177 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:00:39,177 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-05T01:00:39,181 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-05T01:00:39,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:39,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:39,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:39,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:39,219 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:39,219 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:39,220 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:39,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:39,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 863 msec 2024-12-05T01:00:39,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-05T01:00:39,512 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-05T01:00:39,512 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-05T01:00:39,513 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:00:39,523 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-05T01:00:39,524 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:00:39,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-05T01:00:39,524 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T01:00:39,529 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-05T01:00:39,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360439529 (current time:1733360439529). 2024-12-05T01:00:39,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:00:39,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-05T01:00:39,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:00:39,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ea87e4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:39,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:00:39,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:00:39,541 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:00:39,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:00:39,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:00:39,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60fcd79f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:39,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:00:39,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:00:39,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:39,545 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55534, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:00:39,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27b80fe7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:39,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:00:39,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:00:39,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:00:39,549 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44748, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:39,551 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:00:39,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:00:39,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:39,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:39,552 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:00:39,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ed1e420, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:39,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:00:39,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:00:39,559 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:00:39,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:00:39,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:00:39,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dfd9514, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:39,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:00:39,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:00:39,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:39,562 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:00:39,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a33cbab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:39,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:00:39,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:00:39,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:00:39,567 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44762, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:39,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:00:39,572 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:00:39,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:00:39,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:39,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:39,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-05T01:00:39,572 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:00:39,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:00:39,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-05T01:00:39,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-05T01:00:39,575 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:00:39,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-05T01:00:39,577 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:00:39,580 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:00:39,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741961_1137 (size=161) 2024-12-05T01:00:39,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741961_1137 (size=161) 2024-12-05T01:00:39,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741961_1137 (size=161) 2024-12-05T01:00:39,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-05T01:00:39,689 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:00:39,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d09f955f3f0039510e79bccb1720562}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65}] 2024-12-05T01:00:39,692 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,695 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-05T01:00:39,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:39,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 99e1c29b53be5af6eac7ef1de4e1fe65: 2024-12-05T01:00:39,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-05T01:00:39,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-05T01:00:39,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:00:39,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:00:39,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-05T01:00:39,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:39,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for 7d09f955f3f0039510e79bccb1720562: 2024-12-05T01:00:39,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-05T01:00:39,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-05T01:00:39,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:00:39,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:00:39,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-05T01:00:39,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741962_1138 (size=68) 2024-12-05T01:00:39,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741962_1138 (size=68) 2024-12-05T01:00:39,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741962_1138 (size=68) 2024-12-05T01:00:39,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741963_1139 (size=68) 2024-12-05T01:00:39,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741963_1139 (size=68) 2024-12-05T01:00:39,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741963_1139 (size=68) 2024-12-05T01:00:39,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:39,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-05T01:00:39,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:39,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-05T01:00:39,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-05T01:00:39,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-05T01:00:39,944 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,944 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:39,945 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:39,948 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65 in 257 msec 2024-12-05T01:00:39,950 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=69, resume processing ppid=68 2024-12-05T01:00:39,950 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7d09f955f3f0039510e79bccb1720562 in 257 msec 2024-12-05T01:00:39,950 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:00:39,951 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:00:39,952 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:00:39,952 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-05T01:00:39,953 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-05T01:00:40,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741964_1140 (size=543) 2024-12-05T01:00:40,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741964_1140 (size=543) 2024-12-05T01:00:40,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741964_1140 (size=543) 2024-12-05T01:00:40,065 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:00:40,089 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:00:40,090 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-05T01:00:40,093 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:00:40,093 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-05T01:00:40,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 520 msec 2024-12-05T01:00:40,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-05T01:00:40,202 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-05T01:00:40,210 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='090405f834da2ecb3d82b60f58616c8b4', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:00:40,214 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='18e1a89ea65cb58500b6e38f32c015c46', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:00:40,217 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='2d4923041cbb7b50e1072b508d249c3c2', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:00:40,226 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51588, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:40,227 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41191 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:00:40,292 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:00:40,294 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T01:00:40,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-05T01:00:40,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:40,310 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:00:40,316 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T01:00:40,324 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T01:00:40,333 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T01:00:40,344 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-05T01:00:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360440344 (current time:1733360440344). 2024-12-05T01:00:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:00:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-05T01:00:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:00:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56b459dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:00:40,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:00:40,347 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:00:40,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:00:40,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:00:40,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@107d3e23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:40,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:00:40,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:00:40,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:40,351 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55564, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:00:40,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30751, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:00:40,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:00:40,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:00:40,356 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44768, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:40,358 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:00:40,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:00:40,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:40,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:40,359 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:00:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7162530d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:00:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:00:40,364 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:00:40,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:00:40,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:00:40,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d405aad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:40,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:00:40,366 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:00:40,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:40,367 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55586, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:00:40,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd2475b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:40,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:00:40,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:00:40,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:00:40,373 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44774, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:40,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:00:40,377 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:00:40,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:00:40,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:40,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:40,378 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:00:40,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-05T01:00:40,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:00:40,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-05T01:00:40,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-05T01:00:40,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-05T01:00:40,382 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:00:40,383 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:00:40,386 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:00:40,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741965_1141 (size=156) 2024-12-05T01:00:40,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741965_1141 (size=156) 2024-12-05T01:00:40,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741965_1141 (size=156) 2024-12-05T01:00:40,416 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:00:40,416 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d09f955f3f0039510e79bccb1720562}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65}] 2024-12-05T01:00:40,418 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:40,418 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:40,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-05T01:00:40,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-05T01:00:40,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-05T01:00:40,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:40,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:40,572 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing 7d09f955f3f0039510e79bccb1720562 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-05T01:00:40,572 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 99e1c29b53be5af6eac7ef1de4e1fe65 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-05T01:00:40,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/.tmp/cf/a1ec98bd04934215a7cb5ec1e90358ee is 71, key is 19944b8d456e116ba07c2db800a97e86/cf:q/1733360440291/Put/seqid=0 2024-12-05T01:00:40,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/.tmp/cf/fca01a6158c749d8badf29259a04de6f is 71, key is 0f13be70f280780ebc5de5c3e7a081f2/cf:q/1733360440227/Put/seqid=0 2024-12-05T01:00:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-05T01:00:40,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741966_1142 (size=8394) 2024-12-05T01:00:40,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741966_1142 (size=8394) 2024-12-05T01:00:40,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741966_1142 (size=8394) 2024-12-05T01:00:40,729 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/.tmp/cf/a1ec98bd04934215a7cb5ec1e90358ee 2024-12-05T01:00:40,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/.tmp/cf/a1ec98bd04934215a7cb5ec1e90358ee as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/cf/a1ec98bd04934215a7cb5ec1e90358ee 2024-12-05T01:00:40,779 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/cf/a1ec98bd04934215a7cb5ec1e90358ee, entries=48, sequenceid=6, filesize=8.2 K 2024-12-05T01:00:40,789 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 99e1c29b53be5af6eac7ef1de4e1fe65 in 217ms, sequenceid=6, compaction requested=false 2024-12-05T01:00:40,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-05T01:00:40,791 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 99e1c29b53be5af6eac7ef1de4e1fe65: 2024-12-05T01:00:40,791 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. for snaptb0-testExportWithResetTtl completed. 2024-12-05T01:00:40,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-05T01:00:40,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:00:40,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/cf/a1ec98bd04934215a7cb5ec1e90358ee] hfiles 2024-12-05T01:00:40,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/cf/a1ec98bd04934215a7cb5ec1e90358ee for snapshot=snaptb0-testExportWithResetTtl 2024-12-05T01:00:40,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741967_1143 (size=5216) 2024-12-05T01:00:40,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741967_1143 (size=5216) 2024-12-05T01:00:40,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741967_1143 (size=5216) 2024-12-05T01:00:40,802 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/.tmp/cf/fca01a6158c749d8badf29259a04de6f 2024-12-05T01:00:40,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/.tmp/cf/fca01a6158c749d8badf29259a04de6f as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/cf/fca01a6158c749d8badf29259a04de6f 2024-12-05T01:00:40,823 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/cf/fca01a6158c749d8badf29259a04de6f, entries=2, sequenceid=6, filesize=5.1 K 2024-12-05T01:00:40,824 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 7d09f955f3f0039510e79bccb1720562 in 252ms, sequenceid=6, compaction requested=false 2024-12-05T01:00:40,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for 7d09f955f3f0039510e79bccb1720562: 2024-12-05T01:00:40,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. for snaptb0-testExportWithResetTtl completed. 2024-12-05T01:00:40,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-05T01:00:40,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:00:40,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/cf/fca01a6158c749d8badf29259a04de6f] hfiles 2024-12-05T01:00:40,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/cf/fca01a6158c749d8badf29259a04de6f for snapshot=snaptb0-testExportWithResetTtl 2024-12-05T01:00:40,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741969_1145 (size=107) 2024-12-05T01:00:40,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741969_1145 (size=107) 2024-12-05T01:00:40,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:40,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-05T01:00:40,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741969_1145 (size=107) 2024-12-05T01:00:40,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-05T01:00:40,879 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:40,879 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:40,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7d09f955f3f0039510e79bccb1720562 in 466 msec 2024-12-05T01:00:40,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741968_1144 (size=107) 2024-12-05T01:00:40,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741968_1144 (size=107) 2024-12-05T01:00:40,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741968_1144 (size=107) 2024-12-05T01:00:40,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:40,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-05T01:00:40,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-05T01:00:40,901 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:40,901 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:40,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-12-05T01:00:40,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65 in 486 msec 2024-12-05T01:00:40,908 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:00:40,910 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:00:40,911 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:00:40,911 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-05T01:00:40,914 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-05T01:00:40,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741970_1146 (size=621) 2024-12-05T01:00:40,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741970_1146 (size=621) 2024-12-05T01:00:40,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741970_1146 (size=621) 2024-12-05T01:00:40,935 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:00:40,943 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:00:40,943 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-05T01:00:40,945 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:00:40,945 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-05T01:00:40,946 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 566 msec 2024-12-05T01:00:41,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-05T01:00:41,012 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-05T01:00:41,014 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:00:41,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-05T01:00:41,016 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:00:41,017 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:00:41,017 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-05T01:00:41,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-05T01:00:41,018 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:00:41,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741971_1147 (size=397) 2024-12-05T01:00:41,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741971_1147 (size=397) 2024-12-05T01:00:41,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741971_1147 (size=397) 2024-12-05T01:00:41,065 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 34551bb942076cf001f64da2ea0637b8, NAME => 'testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:41,066 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => d0e1ebb6cb75461009795078449663b3, NAME => 'testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:41,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741973_1149 (size=58) 2024-12-05T01:00:41,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741973_1149 (size=58) 2024-12-05T01:00:41,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741973_1149 (size=58) 2024-12-05T01:00:41,101 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:41,101 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 34551bb942076cf001f64da2ea0637b8, disabling compactions & flushes 2024-12-05T01:00:41,101 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:41,101 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:41,101 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. after waiting 0 ms 2024-12-05T01:00:41,101 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:41,101 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:41,101 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 34551bb942076cf001f64da2ea0637b8: Waiting for close lock at 1733360441101Disabling compacts and flushes for region at 1733360441101Disabling writes for close at 1733360441101Writing region close event to WAL at 1733360441101Closed at 1733360441101 2024-12-05T01:00:41,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-05T01:00:41,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741972_1148 (size=58) 2024-12-05T01:00:41,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741972_1148 (size=58) 2024-12-05T01:00:41,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741972_1148 (size=58) 2024-12-05T01:00:41,129 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:41,129 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing d0e1ebb6cb75461009795078449663b3, disabling compactions & flushes 2024-12-05T01:00:41,129 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:41,129 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:41,129 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. after waiting 0 ms 2024-12-05T01:00:41,129 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:41,129 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:41,129 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for d0e1ebb6cb75461009795078449663b3: Waiting for close lock at 1733360441129Disabling compacts and flushes for region at 1733360441129Disabling writes for close at 1733360441129Writing region close event to WAL at 1733360441129Closed at 1733360441129 2024-12-05T01:00:41,131 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:00:41,131 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733360441131"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360441131"}]},"ts":"1733360441131"} 2024-12-05T01:00:41,131 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733360441131"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360441131"}]},"ts":"1733360441131"} 2024-12-05T01:00:41,135 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:00:41,137 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:00:41,137 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360441137"}]},"ts":"1733360441137"} 2024-12-05T01:00:41,139 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-05T01:00:41,140 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:00:41,142 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:00:41,142 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:00:41,142 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:00:41,142 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:00:41,142 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:00:41,142 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:00:41,142 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:00:41,142 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:00:41,142 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:00:41,142 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:00:41,142 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=34551bb942076cf001f64da2ea0637b8, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d0e1ebb6cb75461009795078449663b3, ASSIGN}] 2024-12-05T01:00:41,144 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d0e1ebb6cb75461009795078449663b3, ASSIGN 2024-12-05T01:00:41,144 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=34551bb942076cf001f64da2ea0637b8, ASSIGN 2024-12-05T01:00:41,145 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=d0e1ebb6cb75461009795078449663b3, ASSIGN; state=OFFLINE, location=2113c16e5528,44435,1733360288855; forceNewPlan=false, retain=false 2024-12-05T01:00:41,146 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=34551bb942076cf001f64da2ea0637b8, ASSIGN; state=OFFLINE, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T01:00:41,295 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:00:41,297 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=d0e1ebb6cb75461009795078449663b3, regionState=OPENING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:00:41,297 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=34551bb942076cf001f64da2ea0637b8, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:41,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=34551bb942076cf001f64da2ea0637b8, ASSIGN because future has completed 2024-12-05T01:00:41,300 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 34551bb942076cf001f64da2ea0637b8, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:00:41,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=d0e1ebb6cb75461009795078449663b3, ASSIGN because future has completed 2024-12-05T01:00:41,301 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure d0e1ebb6cb75461009795078449663b3, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:00:41,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-05T01:00:41,460 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:41,460 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => d0e1ebb6cb75461009795078449663b3, NAME => 'testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T01:00:41,461 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. service=AccessControlService 2024-12-05T01:00:41,461 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:00:41,461 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,461 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:41,462 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,462 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,464 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:41,464 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => 34551bb942076cf001f64da2ea0637b8, NAME => 'testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T01:00:41,465 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. service=AccessControlService 2024-12-05T01:00:41,465 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:00:41,465 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,465 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:00:41,465 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,465 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,472 INFO [StoreOpener-d0e1ebb6cb75461009795078449663b3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,473 INFO [StoreOpener-34551bb942076cf001f64da2ea0637b8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,475 INFO [StoreOpener-d0e1ebb6cb75461009795078449663b3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0e1ebb6cb75461009795078449663b3 columnFamilyName cf 2024-12-05T01:00:41,475 DEBUG [StoreOpener-d0e1ebb6cb75461009795078449663b3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:00:41,475 INFO [StoreOpener-34551bb942076cf001f64da2ea0637b8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 34551bb942076cf001f64da2ea0637b8 columnFamilyName cf 2024-12-05T01:00:41,475 DEBUG [StoreOpener-34551bb942076cf001f64da2ea0637b8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:00:41,475 INFO [StoreOpener-d0e1ebb6cb75461009795078449663b3-1 {}] regionserver.HStore(327): Store=d0e1ebb6cb75461009795078449663b3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:00:41,476 INFO [StoreOpener-34551bb942076cf001f64da2ea0637b8-1 {}] regionserver.HStore(327): Store=34551bb942076cf001f64da2ea0637b8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:00:41,476 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,476 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,477 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,477 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,477 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,478 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,478 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,478 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,478 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,478 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,481 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,482 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,488 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:00:41,489 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened 34551bb942076cf001f64da2ea0637b8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71138231, jitterRate=0.06004224717617035}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:00:41,489 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,490 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for 34551bb942076cf001f64da2ea0637b8: Running coprocessor pre-open hook at 1733360441465Writing region info on filesystem at 1733360441466 (+1 ms)Initializing all the Stores at 1733360441466Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360441466Cleaning up temporary data from old regions at 1733360441478 (+12 ms)Running coprocessor post-open hooks at 1733360441489 (+11 ms)Region opened successfully at 1733360441490 (+1 ms) 2024-12-05T01:00:41,492 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8., pid=77, masterSystemTime=1733360441452 2024-12-05T01:00:41,495 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:41,495 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:41,495 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=34551bb942076cf001f64da2ea0637b8, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:41,503 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 34551bb942076cf001f64da2ea0637b8, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:00:41,506 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:00:41,506 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened d0e1ebb6cb75461009795078449663b3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72609880, jitterRate=0.08197152614593506}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:00:41,507 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,507 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for d0e1ebb6cb75461009795078449663b3: Running coprocessor pre-open hook at 1733360441462Writing region info on filesystem at 1733360441462Initializing all the Stores at 1733360441463 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360441463Cleaning up temporary data from old regions at 1733360441478 (+15 ms)Running coprocessor post-open hooks at 1733360441507 (+29 ms)Region opened successfully at 1733360441507 2024-12-05T01:00:41,508 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3., pid=78, masterSystemTime=1733360441455 2024-12-05T01:00:41,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=75 2024-12-05T01:00:41,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 34551bb942076cf001f64da2ea0637b8, server=2113c16e5528,32927,1733360288712 in 204 msec 2024-12-05T01:00:41,512 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:41,512 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:41,512 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=34551bb942076cf001f64da2ea0637b8, ASSIGN in 366 msec 2024-12-05T01:00:41,513 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=d0e1ebb6cb75461009795078449663b3, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:00:41,516 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure d0e1ebb6cb75461009795078449663b3, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:00:41,520 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=76 2024-12-05T01:00:41,520 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure d0e1ebb6cb75461009795078449663b3, server=2113c16e5528,44435,1733360288855 in 216 msec 2024-12-05T01:00:41,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=76, resume processing ppid=74 2024-12-05T01:00:41,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d0e1ebb6cb75461009795078449663b3, ASSIGN in 378 msec 2024-12-05T01:00:41,524 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:00:41,524 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360441524"}]},"ts":"1733360441524"} 2024-12-05T01:00:41,527 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-05T01:00:41,528 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:00:41,528 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-05T01:00:41,532 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-05T01:00:41,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:41,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:41,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:41,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:41,543 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:41,543 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:41,544 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:41,544 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:41,544 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:41,544 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:41,545 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:41,545 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:41,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 528 msec 2024-12-05T01:00:41,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-05T01:00:41,642 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-05T01:00:41,643 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-05T01:00:41,643 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:00:41,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-05T01:00:41,648 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:00:41,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-12-05T01:00:41,649 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T01:00:41,656 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='06d1b3ae121d6dff751c0e4787be4324a', locateType=CURRENT is [region=testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:00:41,657 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='15c1447cb2e1f1e0d8283deb7e9f3c8b8', locateType=CURRENT is [region=testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:00:41,658 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='2b2c6bd84dcd9e3573e6dae06e658faf9', locateType=CURRENT is [region=testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:00:41,659 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='3fa6465691c629cb0a7ee4dbe3a155168', locateType=CURRENT is [region=testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:00:41,659 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='4e2a9d09cf850ba617e310ec292f8ec84', locateType=CURRENT is [region=testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:00:41,664 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:00:41,668 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44435 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:00:41,670 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T01:00:41,672 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-05T01:00:41,672 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:41,672 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:00:41,674 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T01:00:41,682 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T01:00:41,688 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T01:00:41,691 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-05T01:00:41,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360441691 (current time:1733360441691). 2024-12-05T01:00:41,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-05T01:00:41,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:00:41,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d91c28c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:41,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:00:41,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:00:41,695 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:00:41,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:00:41,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:00:41,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74c0b407, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:41,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:00:41,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:00:41,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:41,697 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40276, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:00:41,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56db655c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:41,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:00:41,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:00:41,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:00:41,700 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42440, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:41,701 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:00:41,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:00:41,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:41,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:41,702 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:00:41,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cf244a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:41,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:00:41,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:00:41,704 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:00:41,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:00:41,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:00:41,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a362db4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:41,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:00:41,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:00:41,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:41,713 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40282, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:00:41,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3392dd73, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:00:41,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:00:41,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:00:41,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:00:41,717 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42452, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:00:41,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:00:41,721 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:00:41,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:00:41,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:41,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:00:41,721 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:00:41,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-05T01:00:41,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:00:41,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-05T01:00:41,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-05T01:00:41,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-05T01:00:41,724 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:00:41,725 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:00:41,728 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:00:41,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741974_1150 (size=143) 2024-12-05T01:00:41,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741974_1150 (size=143) 2024-12-05T01:00:41,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741974_1150 (size=143) 2024-12-05T01:00:41,739 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:00:41,739 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 34551bb942076cf001f64da2ea0637b8}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d0e1ebb6cb75461009795078449663b3}] 2024-12-05T01:00:41,740 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,740 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-05T01:00:41,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-05T01:00:41,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-05T01:00:41,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:41,893 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 34551bb942076cf001f64da2ea0637b8 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-05T01:00:41,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:41,894 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing d0e1ebb6cb75461009795078449663b3 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-05T01:00:41,897 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-05T01:00:41,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/.tmp/cf/c1cd992170a848d2816dd36c73b0591e is 71, key is 11a0bbb15f501d4850ef6c79272374a2/cf:q/1733360441668/Put/seqid=0 2024-12-05T01:00:41,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/.tmp/cf/b9c86a99e4df401ea94123d80539bd29 is 71, key is 0624748880ad686cc00b6ebc9ffa176e/cf:q/1733360441664/Put/seqid=0 2024-12-05T01:00:41,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741975_1151 (size=8256) 2024-12-05T01:00:41,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741975_1151 (size=8256) 2024-12-05T01:00:41,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741975_1151 (size=8256) 2024-12-05T01:00:41,933 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/.tmp/cf/c1cd992170a848d2816dd36c73b0591e 2024-12-05T01:00:41,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/.tmp/cf/c1cd992170a848d2816dd36c73b0591e as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/cf/c1cd992170a848d2816dd36c73b0591e 2024-12-05T01:00:41,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741976_1152 (size=5354) 2024-12-05T01:00:41,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741976_1152 (size=5354) 2024-12-05T01:00:41,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741976_1152 (size=5354) 2024-12-05T01:00:41,942 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/.tmp/cf/b9c86a99e4df401ea94123d80539bd29 2024-12-05T01:00:41,945 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/cf/c1cd992170a848d2816dd36c73b0591e, entries=46, sequenceid=5, filesize=8.1 K 2024-12-05T01:00:41,946 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for d0e1ebb6cb75461009795078449663b3 in 53ms, sequenceid=5, compaction requested=false 2024-12-05T01:00:41,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for d0e1ebb6cb75461009795078449663b3: 2024-12-05T01:00:41,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. for snaptb-testExportWithResetTtl completed. 2024-12-05T01:00:41,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-05T01:00:41,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:00:41,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/cf/c1cd992170a848d2816dd36c73b0591e] hfiles 2024-12-05T01:00:41,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/cf/c1cd992170a848d2816dd36c73b0591e for snapshot=snaptb-testExportWithResetTtl 2024-12-05T01:00:41,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/.tmp/cf/b9c86a99e4df401ea94123d80539bd29 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/cf/b9c86a99e4df401ea94123d80539bd29 2024-12-05T01:00:41,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741977_1153 (size=100) 2024-12-05T01:00:41,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741977_1153 (size=100) 2024-12-05T01:00:41,954 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/cf/b9c86a99e4df401ea94123d80539bd29, entries=4, sequenceid=5, filesize=5.2 K 2024-12-05T01:00:41,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741977_1153 (size=100) 2024-12-05T01:00:41,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:41,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-05T01:00:41,955 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 34551bb942076cf001f64da2ea0637b8 in 62ms, sequenceid=5, compaction requested=false 2024-12-05T01:00:41,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-05T01:00:41,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 34551bb942076cf001f64da2ea0637b8: 2024-12-05T01:00:41,955 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. for snaptb-testExportWithResetTtl completed. 2024-12-05T01:00:41,956 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:41,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-05T01:00:41,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:00:41,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/cf/b9c86a99e4df401ea94123d80539bd29] hfiles 2024-12-05T01:00:41,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/cf/b9c86a99e4df401ea94123d80539bd29 for snapshot=snaptb-testExportWithResetTtl 2024-12-05T01:00:41,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d0e1ebb6cb75461009795078449663b3 in 217 msec 2024-12-05T01:00:41,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741978_1154 (size=100) 2024-12-05T01:00:41,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741978_1154 (size=100) 2024-12-05T01:00:41,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741978_1154 (size=100) 2024-12-05T01:00:41,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:41,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-05T01:00:41,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-05T01:00:41,964 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,964 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:41,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=79 2024-12-05T01:00:41,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 34551bb942076cf001f64da2ea0637b8 in 226 msec 2024-12-05T01:00:41,966 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:00:41,967 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:00:41,968 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:00:41,968 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-05T01:00:41,968 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-05T01:00:41,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741979_1155 (size=600) 2024-12-05T01:00:41,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741979_1155 (size=600) 2024-12-05T01:00:41,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741979_1155 (size=600) 2024-12-05T01:00:41,986 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:00:41,991 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:00:41,991 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-05T01:00:41,993 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:00:41,993 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-05T01:00:41,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 271 msec 2024-12-05T01:00:42,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-05T01:00:42,042 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-05T01:00:42,052 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360442051 2024-12-05T01:00:42,052 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37149, tgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360442051, rawTgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360442051, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:42,094 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:42,094 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360442051, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360442051/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-05T01:00:42,096 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T01:00:42,101 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360442051/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-05T01:00:42,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741981_1157 (size=143) 2024-12-05T01:00:42,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741981_1157 (size=143) 2024-12-05T01:00:42,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741981_1157 (size=143) 2024-12-05T01:00:42,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741980_1156 (size=600) 2024-12-05T01:00:42,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741980_1156 (size=600) 2024-12-05T01:00:42,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741980_1156 (size=600) 2024-12-05T01:00:42,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741982_1158 (size=141) 2024-12-05T01:00:42,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741982_1158 (size=141) 2024-12-05T01:00:42,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741982_1158 (size=141) 2024-12-05T01:00:42,138 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:42,138 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:42,139 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:42,507 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0002_000001 (auth:SIMPLE) from 127.0.0.1:32992 2024-12-05T01:00:42,520 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_1/usercache/jenkins/appcache/application_1733360294895_0002/container_1733360294895_0002_01_000001/launch_container.sh] 2024-12-05T01:00:42,520 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_1/usercache/jenkins/appcache/application_1733360294895_0002/container_1733360294895_0002_01_000001/container_tokens] 2024-12-05T01:00:42,520 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_1/usercache/jenkins/appcache/application_1733360294895_0002/container_1733360294895_0002_01_000001/sysfs] 2024-12-05T01:00:43,141 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-8880523278579909683.jar 2024-12-05T01:00:43,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:43,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:43,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-10957147204167075794.jar 2024-12-05T01:00:43,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:43,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:43,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:43,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:43,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:43,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:00:43,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T01:00:43,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T01:00:43,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T01:00:43,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T01:00:43,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T01:00:43,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T01:00:43,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T01:00:43,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T01:00:43,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T01:00:43,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T01:00:43,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T01:00:43,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:00:43,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:00:43,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:00:43,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:00:43,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:00:43,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:00:43,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:00:43,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741983_1159 (size=24020) 2024-12-05T01:00:43,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741983_1159 (size=24020) 2024-12-05T01:00:43,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741983_1159 (size=24020) 2024-12-05T01:00:43,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741984_1160 (size=77755) 2024-12-05T01:00:43,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741984_1160 (size=77755) 2024-12-05T01:00:43,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741984_1160 (size=77755) 2024-12-05T01:00:43,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741985_1161 (size=131360) 2024-12-05T01:00:43,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741985_1161 (size=131360) 2024-12-05T01:00:43,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741985_1161 (size=131360) 2024-12-05T01:00:43,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741986_1162 (size=111793) 2024-12-05T01:00:43,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741986_1162 (size=111793) 2024-12-05T01:00:43,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741986_1162 (size=111793) 2024-12-05T01:00:43,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741987_1163 (size=1832290) 2024-12-05T01:00:43,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741987_1163 (size=1832290) 2024-12-05T01:00:43,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741987_1163 (size=1832290) 2024-12-05T01:00:43,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741988_1164 (size=8360282) 2024-12-05T01:00:43,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741988_1164 (size=8360282) 2024-12-05T01:00:43,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741988_1164 (size=8360282) 2024-12-05T01:00:43,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741989_1165 (size=503880) 2024-12-05T01:00:43,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741989_1165 (size=503880) 2024-12-05T01:00:43,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741989_1165 (size=503880) 2024-12-05T01:00:43,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741990_1166 (size=322274) 2024-12-05T01:00:43,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741990_1166 (size=322274) 2024-12-05T01:00:43,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741990_1166 (size=322274) 2024-12-05T01:00:43,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741991_1167 (size=20406) 2024-12-05T01:00:43,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741991_1167 (size=20406) 2024-12-05T01:00:43,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741991_1167 (size=20406) 2024-12-05T01:00:43,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741992_1168 (size=45609) 2024-12-05T01:00:43,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741992_1168 (size=45609) 2024-12-05T01:00:43,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741992_1168 (size=45609) 2024-12-05T01:00:43,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741993_1169 (size=136454) 2024-12-05T01:00:43,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741993_1169 (size=136454) 2024-12-05T01:00:43,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741993_1169 (size=136454) 2024-12-05T01:00:43,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741994_1170 (size=1597136) 2024-12-05T01:00:43,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741994_1170 (size=1597136) 2024-12-05T01:00:43,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741994_1170 (size=1597136) 2024-12-05T01:00:43,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741995_1171 (size=30873) 2024-12-05T01:00:43,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741995_1171 (size=30873) 2024-12-05T01:00:43,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741995_1171 (size=30873) 2024-12-05T01:00:43,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741996_1172 (size=29229) 2024-12-05T01:00:43,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741996_1172 (size=29229) 2024-12-05T01:00:43,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741996_1172 (size=29229) 2024-12-05T01:00:43,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741997_1173 (size=903858) 2024-12-05T01:00:43,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741997_1173 (size=903858) 2024-12-05T01:00:43,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741997_1173 (size=903858) 2024-12-05T01:00:43,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741998_1174 (size=443171) 2024-12-05T01:00:43,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741998_1174 (size=443171) 2024-12-05T01:00:43,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741998_1174 (size=443171) 2024-12-05T01:00:43,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741999_1175 (size=5175431) 2024-12-05T01:00:43,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741999_1175 (size=5175431) 2024-12-05T01:00:43,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741999_1175 (size=5175431) 2024-12-05T01:00:43,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742000_1176 (size=232881) 2024-12-05T01:00:43,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742000_1176 (size=232881) 2024-12-05T01:00:43,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742000_1176 (size=232881) 2024-12-05T01:00:43,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742001_1177 (size=1323991) 2024-12-05T01:00:43,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742001_1177 (size=1323991) 2024-12-05T01:00:43,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742001_1177 (size=1323991) 2024-12-05T01:00:43,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742002_1178 (size=4695811) 2024-12-05T01:00:43,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742002_1178 (size=4695811) 2024-12-05T01:00:43,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742002_1178 (size=4695811) 2024-12-05T01:00:43,522 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:00:43,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742003_1179 (size=1877034) 2024-12-05T01:00:43,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742003_1179 (size=1877034) 2024-12-05T01:00:43,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742003_1179 (size=1877034) 2024-12-05T01:00:43,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742004_1180 (size=6424744) 2024-12-05T01:00:43,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742004_1180 (size=6424744) 2024-12-05T01:00:43,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742004_1180 (size=6424744) 2024-12-05T01:00:43,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742005_1181 (size=217555) 2024-12-05T01:00:43,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742005_1181 (size=217555) 2024-12-05T01:00:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742005_1181 (size=217555) 2024-12-05T01:00:43,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742006_1182 (size=4188619) 2024-12-05T01:00:43,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742006_1182 (size=4188619) 2024-12-05T01:00:43,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742006_1182 (size=4188619) 2024-12-05T01:00:43,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742007_1183 (size=127628) 2024-12-05T01:00:43,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742007_1183 (size=127628) 2024-12-05T01:00:43,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742007_1183 (size=127628) 2024-12-05T01:00:43,602 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T01:00:43,604 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-05T01:00:43,606 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-05T01:00:43,606 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-05T01:00:43,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742008_1184 (size=427) 2024-12-05T01:00:43,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742008_1184 (size=427) 2024-12-05T01:00:43,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742008_1184 (size=427) 2024-12-05T01:00:43,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742009_1185 (size=21) 2024-12-05T01:00:43,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742009_1185 (size=21) 2024-12-05T01:00:43,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742009_1185 (size=21) 2024-12-05T01:00:43,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742010_1186 (size=304077) 2024-12-05T01:00:43,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742010_1186 (size=304077) 2024-12-05T01:00:43,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742010_1186 (size=304077) 2024-12-05T01:00:43,676 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:00:43,676 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:00:44,187 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0003_000001 (auth:SIMPLE) from 127.0.0.1:35346 2024-12-05T01:00:48,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-05T01:00:48,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-05T01:00:48,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-05T01:00:48,305 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-05T01:00:49,118 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0003_000001 (auth:SIMPLE) from 127.0.0.1:55638 2024-12-05T01:00:49,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742011_1187 (size=349775) 2024-12-05T01:00:49,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742011_1187 (size=349775) 2024-12-05T01:00:49,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742011_1187 (size=349775) 2024-12-05T01:00:51,395 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0003_000001 (auth:SIMPLE) from 127.0.0.1:39808 2024-12-05T01:00:51,396 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0003_000001 (auth:SIMPLE) from 127.0.0.1:45014 2024-12-05T01:00:55,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742012_1188 (size=8256) 2024-12-05T01:00:55,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742012_1188 (size=8256) 2024-12-05T01:00:55,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742012_1188 (size=8256) 2024-12-05T01:00:56,134 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0003/container_1733360294895_0003_01_000002/launch_container.sh] 2024-12-05T01:00:56,134 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0003/container_1733360294895_0003_01_000002/container_tokens] 2024-12-05T01:00:56,134 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0003/container_1733360294895_0003_01_000002/sysfs] 2024-12-05T01:00:56,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742014_1190 (size=5354) 2024-12-05T01:00:56,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742014_1190 (size=5354) 2024-12-05T01:00:56,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742014_1190 (size=5354) 2024-12-05T01:00:56,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742013_1189 (size=22124) 2024-12-05T01:00:56,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742013_1189 (size=22124) 2024-12-05T01:00:56,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742013_1189 (size=22124) 2024-12-05T01:00:56,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742015_1191 (size=462) 2024-12-05T01:00:56,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742015_1191 (size=462) 2024-12-05T01:00:56,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742015_1191 (size=462) 2024-12-05T01:00:56,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742016_1192 (size=22124) 2024-12-05T01:00:56,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742016_1192 (size=22124) 2024-12-05T01:00:56,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742016_1192 (size=22124) 2024-12-05T01:00:56,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742017_1193 (size=349775) 2024-12-05T01:00:56,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742017_1193 (size=349775) 2024-12-05T01:00:56,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742017_1193 (size=349775) 2024-12-05T01:00:56,903 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0003/container_1733360294895_0003_01_000003/launch_container.sh] 2024-12-05T01:00:56,904 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0003/container_1733360294895_0003_01_000003/container_tokens] 2024-12-05T01:00:56,904 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0003/container_1733360294895_0003_01_000003/sysfs] 2024-12-05T01:00:56,915 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0003_000001 (auth:SIMPLE) from 127.0.0.1:39812 2024-12-05T01:00:58,821 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T01:00:58,823 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T01:00:58,829 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-05T01:00:58,829 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T01:00:58,829 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T01:00:58,830 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-05T01:00:58,830 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-05T01:00:58,830 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-05T01:00:58,830 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360442051/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360442051/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-05T01:00:58,831 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360442051/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-05T01:00:58,831 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360442051/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-05T01:00:58,837 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-05T01:00:58,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-05T01:00:58,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-05T01:00:58,841 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360458840"}]},"ts":"1733360458840"} 2024-12-05T01:00:58,843 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-05T01:00:58,843 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-05T01:00:58,844 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-05T01:00:58,846 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=34551bb942076cf001f64da2ea0637b8, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d0e1ebb6cb75461009795078449663b3, UNASSIGN}] 2024-12-05T01:00:58,847 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=34551bb942076cf001f64da2ea0637b8, UNASSIGN 2024-12-05T01:00:58,847 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d0e1ebb6cb75461009795078449663b3, UNASSIGN 2024-12-05T01:00:58,848 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=34551bb942076cf001f64da2ea0637b8, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:58,848 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=d0e1ebb6cb75461009795078449663b3, regionState=CLOSING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:00:58,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=34551bb942076cf001f64da2ea0637b8, UNASSIGN because future has completed 2024-12-05T01:00:58,850 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:00:58,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 34551bb942076cf001f64da2ea0637b8, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:00:58,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=d0e1ebb6cb75461009795078449663b3, UNASSIGN because future has completed 2024-12-05T01:00:58,852 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:00:58,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure d0e1ebb6cb75461009795078449663b3, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:00:58,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-05T01:00:59,004 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:59,004 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:00:59,004 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 34551bb942076cf001f64da2ea0637b8, disabling compactions & flushes 2024-12-05T01:00:59,004 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:59,005 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:59,005 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. after waiting 0 ms 2024-12-05T01:00:59,005 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:59,005 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:59,005 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:00:59,005 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing d0e1ebb6cb75461009795078449663b3, disabling compactions & flushes 2024-12-05T01:00:59,005 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:59,005 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:59,005 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. after waiting 0 ms 2024-12-05T01:00:59,005 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:59,017 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T01:00:59,018 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:00:59,018 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8. 2024-12-05T01:00:59,018 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 34551bb942076cf001f64da2ea0637b8: Waiting for close lock at 1733360459004Running coprocessor pre-close hooks at 1733360459004Disabling compacts and flushes for region at 1733360459004Disabling writes for close at 1733360459005 (+1 ms)Writing region close event to WAL at 1733360459007 (+2 ms)Running coprocessor post-close hooks at 1733360459018 (+11 ms)Closed at 1733360459018 2024-12-05T01:00:59,020 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:59,021 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=34551bb942076cf001f64da2ea0637b8, regionState=CLOSED 2024-12-05T01:00:59,022 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T01:00:59,023 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:00:59,024 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3. 2024-12-05T01:00:59,024 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for d0e1ebb6cb75461009795078449663b3: Waiting for close lock at 1733360459005Running coprocessor pre-close hooks at 1733360459005Disabling compacts and flushes for region at 1733360459005Disabling writes for close at 1733360459005Writing region close event to WAL at 1733360459013 (+8 ms)Running coprocessor post-close hooks at 1733360459023 (+10 ms)Closed at 1733360459023 2024-12-05T01:00:59,027 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:59,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 34551bb942076cf001f64da2ea0637b8, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:00:59,028 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=d0e1ebb6cb75461009795078449663b3, regionState=CLOSED 2024-12-05T01:00:59,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure d0e1ebb6cb75461009795078449663b3, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:00:59,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-12-05T01:00:59,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 34551bb942076cf001f64da2ea0637b8, server=2113c16e5528,32927,1733360288712 in 180 msec 2024-12-05T01:00:59,034 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-12-05T01:00:59,034 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=34551bb942076cf001f64da2ea0637b8, UNASSIGN in 186 msec 2024-12-05T01:00:59,034 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure d0e1ebb6cb75461009795078449663b3, server=2113c16e5528,44435,1733360288855 in 180 msec 2024-12-05T01:00:59,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=83 2024-12-05T01:00:59,036 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=d0e1ebb6cb75461009795078449663b3, UNASSIGN in 188 msec 2024-12-05T01:00:59,039 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-05T01:00:59,039 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 192 msec 2024-12-05T01:00:59,042 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360459042"}]},"ts":"1733360459042"} 2024-12-05T01:00:59,045 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-05T01:00:59,045 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-05T01:00:59,048 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 209 msec 2024-12-05T01:00:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-05T01:00:59,163 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-05T01:00:59,164 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-05T01:00:59,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T01:00:59,167 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T01:00:59,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-05T01:00:59,170 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T01:00:59,174 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-05T01:00:59,179 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:59,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T01:00:59,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T01:00:59,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T01:00:59,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T01:00:59,181 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-05T01:00:59,181 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-05T01:00:59,182 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-05T01:00:59,182 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-05T01:00:59,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T01:00:59,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:59,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T01:00:59,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:59,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T01:00:59,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T01:00:59,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:59,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:59,185 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/recovered.edits] 2024-12-05T01:00:59,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-05T01:00:59,186 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:59,186 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:59,187 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:59,187 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T01:00:59,192 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/cf/b9c86a99e4df401ea94123d80539bd29 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/cf/b9c86a99e4df401ea94123d80539bd29 2024-12-05T01:00:59,192 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:59,197 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/recovered.edits/8.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8/recovered.edits/8.seqid 2024-12-05T01:00:59,198 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/recovered.edits] 2024-12-05T01:00:59,198 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/34551bb942076cf001f64da2ea0637b8 2024-12-05T01:00:59,207 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/cf/c1cd992170a848d2816dd36c73b0591e to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/cf/c1cd992170a848d2816dd36c73b0591e 2024-12-05T01:00:59,213 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/recovered.edits/8.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3/recovered.edits/8.seqid 2024-12-05T01:00:59,215 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportWithResetTtl/d0e1ebb6cb75461009795078449663b3 2024-12-05T01:00:59,215 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-05T01:00:59,221 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T01:00:59,228 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-05T01:00:59,233 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-12-05T01:00:59,235 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T01:00:59,235 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-12-05T01:00:59,235 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360459235"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:59,235 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360459235"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:59,240 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T01:00:59,240 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 34551bb942076cf001f64da2ea0637b8, NAME => 'testExportWithResetTtl,,1733360441013.34551bb942076cf001f64da2ea0637b8.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => d0e1ebb6cb75461009795078449663b3, NAME => 'testExportWithResetTtl,1,1733360441013.d0e1ebb6cb75461009795078449663b3.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T01:00:59,240 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-12-05T01:00:59,241 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360459240"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:59,256 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-05T01:00:59,257 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T01:00:59,272 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 96 msec 2024-12-05T01:00:59,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-05T01:00:59,293 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-05T01:00:59,293 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-05T01:00:59,295 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-05T01:00:59,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T01:00:59,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-05T01:00:59,300 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360459300"}]},"ts":"1733360459300"} 2024-12-05T01:00:59,303 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-05T01:00:59,303 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-05T01:00:59,305 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-05T01:00:59,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7d09f955f3f0039510e79bccb1720562, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=99e1c29b53be5af6eac7ef1de4e1fe65, UNASSIGN}] 2024-12-05T01:00:59,308 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=99e1c29b53be5af6eac7ef1de4e1fe65, UNASSIGN 2024-12-05T01:00:59,308 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7d09f955f3f0039510e79bccb1720562, UNASSIGN 2024-12-05T01:00:59,309 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=99e1c29b53be5af6eac7ef1de4e1fe65, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:00:59,311 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=7d09f955f3f0039510e79bccb1720562, regionState=CLOSING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:00:59,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=99e1c29b53be5af6eac7ef1de4e1fe65, UNASSIGN because future has completed 2024-12-05T01:00:59,313 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:00:59,313 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:00:59,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7d09f955f3f0039510e79bccb1720562, UNASSIGN because future has completed 2024-12-05T01:00:59,315 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:00:59,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7d09f955f3f0039510e79bccb1720562, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:00:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-05T01:00:59,467 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:59,467 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:00:59,468 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 99e1c29b53be5af6eac7ef1de4e1fe65, disabling compactions & flushes 2024-12-05T01:00:59,468 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:59,468 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:59,468 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. after waiting 0 ms 2024-12-05T01:00:59,468 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:59,468 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:59,468 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:00:59,469 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing 7d09f955f3f0039510e79bccb1720562, disabling compactions & flushes 2024-12-05T01:00:59,469 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:59,469 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:59,469 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. after waiting 0 ms 2024-12-05T01:00:59,469 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:59,515 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:00:59,517 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:00:59,517 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562. 2024-12-05T01:00:59,517 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for 7d09f955f3f0039510e79bccb1720562: Waiting for close lock at 1733360459468Running coprocessor pre-close hooks at 1733360459468Disabling compacts and flushes for region at 1733360459469 (+1 ms)Disabling writes for close at 1733360459469Writing region close event to WAL at 1733360459473 (+4 ms)Running coprocessor post-close hooks at 1733360459517 (+44 ms)Closed at 1733360459517 2024-12-05T01:00:59,519 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed 7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:59,524 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=7d09f955f3f0039510e79bccb1720562, regionState=CLOSED 2024-12-05T01:00:59,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7d09f955f3f0039510e79bccb1720562, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:00:59,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-12-05T01:00:59,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure 7d09f955f3f0039510e79bccb1720562, server=2113c16e5528,41191,1733360288910 in 213 msec 2024-12-05T01:00:59,533 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7d09f955f3f0039510e79bccb1720562, UNASSIGN in 224 msec 2024-12-05T01:00:59,537 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:00:59,538 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:00:59,538 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65. 2024-12-05T01:00:59,538 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 99e1c29b53be5af6eac7ef1de4e1fe65: Waiting for close lock at 1733360459468Running coprocessor pre-close hooks at 1733360459468Disabling compacts and flushes for region at 1733360459468Disabling writes for close at 1733360459468Writing region close event to WAL at 1733360459512 (+44 ms)Running coprocessor post-close hooks at 1733360459538 (+26 ms)Closed at 1733360459538 2024-12-05T01:00:59,541 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:59,542 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=99e1c29b53be5af6eac7ef1de4e1fe65, regionState=CLOSED 2024-12-05T01:00:59,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:00:59,551 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-12-05T01:00:59,551 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 99e1c29b53be5af6eac7ef1de4e1fe65, server=2113c16e5528,32927,1733360288712 in 235 msec 2024-12-05T01:00:59,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-12-05T01:00:59,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=99e1c29b53be5af6eac7ef1de4e1fe65, UNASSIGN in 244 msec 2024-12-05T01:00:59,558 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-05T01:00:59,558 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 250 msec 2024-12-05T01:00:59,560 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360459559"}]},"ts":"1733360459559"} 2024-12-05T01:00:59,562 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-05T01:00:59,562 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-05T01:00:59,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 267 msec 2024-12-05T01:00:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-05T01:00:59,623 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-05T01:00:59,629 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-05T01:00:59,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T01:00:59,635 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T01:00:59,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-05T01:00:59,637 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T01:00:59,642 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-05T01:00:59,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T01:00:59,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T01:00:59,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T01:00:59,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T01:00:59,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-05T01:00:59,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-05T01:00:59,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:59,650 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-05T01:00:59,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T01:00:59,650 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-05T01:00:59,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:59,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T01:00:59,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:59,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:00:59,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-05T01:00:59,651 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-05T01:00:59,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-05T01:00:59,658 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:59,659 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:59,665 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/recovered.edits] 2024-12-05T01:00:59,666 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/recovered.edits] 2024-12-05T01:00:59,674 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/cf/a1ec98bd04934215a7cb5ec1e90358ee to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/cf/a1ec98bd04934215a7cb5ec1e90358ee 2024-12-05T01:00:59,675 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/cf/fca01a6158c749d8badf29259a04de6f to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/cf/fca01a6158c749d8badf29259a04de6f 2024-12-05T01:00:59,693 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65/recovered.edits/9.seqid 2024-12-05T01:00:59,694 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/99e1c29b53be5af6eac7ef1de4e1fe65 2024-12-05T01:00:59,694 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562/recovered.edits/9.seqid 2024-12-05T01:00:59,695 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithResetTtl/7d09f955f3f0039510e79bccb1720562 2024-12-05T01:00:59,695 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-05T01:00:59,698 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T01:00:59,703 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-05T01:00:59,706 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-05T01:00:59,711 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T01:00:59,711 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-05T01:00:59,712 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360459711"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:59,712 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360459711"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:59,715 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T01:00:59,715 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7d09f955f3f0039510e79bccb1720562, NAME => 'testtb-testExportWithResetTtl,,1733360438356.7d09f955f3f0039510e79bccb1720562.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 99e1c29b53be5af6eac7ef1de4e1fe65, NAME => 'testtb-testExportWithResetTtl,1,1733360438356.99e1c29b53be5af6eac7ef1de4e1fe65.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T01:00:59,715 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-05T01:00:59,715 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360459715"}]},"ts":"9223372036854775807"} 2024-12-05T01:00:59,718 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-05T01:00:59,719 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T01:00:59,722 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 90 msec 2024-12-05T01:00:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-05T01:00:59,762 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-05T01:00:59,762 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-05T01:00:59,773 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-05T01:00:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-05T01:00:59,782 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-05T01:00:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-05T01:00:59,787 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-05T01:00:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-05T01:00:59,820 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=789 (was 787) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:34770 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:53336 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71582475_1 at /127.0.0.1:45570 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:44163 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44163 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 19617) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:45590 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:41563 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2848 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=806 (was 801) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=616 (was 583) - SystemLoadAverage LEAK? -, ProcessCount=28 (was 29), AvailableMemoryMB=5754 (was 5526) - AvailableMemoryMB LEAK? - 2024-12-05T01:00:59,821 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=789 is superior to 500 2024-12-05T01:00:59,858 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=789, OpenFileDescriptor=806, MaxFileDescriptor=1048576, SystemLoadAverage=616, ProcessCount=29, AvailableMemoryMB=5737 2024-12-05T01:00:59,859 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=789 is superior to 500 2024-12-05T01:00:59,861 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:00:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-05T01:00:59,864 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:00:59,864 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:00:59,864 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-05T01:00:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-05T01:00:59,867 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:00:59,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742018_1194 (size=407) 2024-12-05T01:00:59,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742018_1194 (size=407) 2024-12-05T01:00:59,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742018_1194 (size=407) 2024-12-05T01:00:59,919 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9a1f6e5618d93f9950f5d9b8364adc17, NAME => 'testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:59,926 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 57c59d9748bb6b0724486c75422f98c3, NAME => 'testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:00:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-05T01:01:00,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742019_1195 (size=68) 2024-12-05T01:01:00,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742019_1195 (size=68) 2024-12-05T01:01:00,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742019_1195 (size=68) 2024-12-05T01:01:00,012 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:01:00,012 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 9a1f6e5618d93f9950f5d9b8364adc17, disabling compactions & flushes 2024-12-05T01:01:00,012 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:00,012 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:00,012 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. after waiting 0 ms 2024-12-05T01:01:00,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:00,013 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:00,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9a1f6e5618d93f9950f5d9b8364adc17: Waiting for close lock at 1733360460012Disabling compacts and flushes for region at 1733360460012Disabling writes for close at 1733360460012Writing region close event to WAL at 1733360460013 (+1 ms)Closed at 1733360460013 2024-12-05T01:01:00,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742020_1196 (size=68) 2024-12-05T01:01:00,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742020_1196 (size=68) 2024-12-05T01:01:00,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742020_1196 (size=68) 2024-12-05T01:01:00,042 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:01:00,042 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 57c59d9748bb6b0724486c75422f98c3, disabling compactions & flushes 2024-12-05T01:01:00,042 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:00,042 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:00,042 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. after waiting 0 ms 2024-12-05T01:01:00,042 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:00,042 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:00,042 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 57c59d9748bb6b0724486c75422f98c3: Waiting for close lock at 1733360460042Disabling compacts and flushes for region at 1733360460042Disabling writes for close at 1733360460042Writing region close event to WAL at 1733360460042Closed at 1733360460042 2024-12-05T01:01:00,045 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:01:00,045 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733360460045"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360460045"}]},"ts":"1733360460045"} 2024-12-05T01:01:00,046 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733360460045"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360460045"}]},"ts":"1733360460045"} 2024-12-05T01:01:00,050 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:01:00,051 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:01:00,051 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360460051"}]},"ts":"1733360460051"} 2024-12-05T01:01:00,059 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-05T01:01:00,060 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:01:00,062 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:01:00,062 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:01:00,062 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:01:00,062 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:01:00,062 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:01:00,062 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:01:00,062 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:01:00,062 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:01:00,062 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:01:00,062 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:01:00,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9a1f6e5618d93f9950f5d9b8364adc17, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=57c59d9748bb6b0724486c75422f98c3, ASSIGN}] 2024-12-05T01:01:00,064 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9a1f6e5618d93f9950f5d9b8364adc17, ASSIGN 2024-12-05T01:01:00,065 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9a1f6e5618d93f9950f5d9b8364adc17, ASSIGN; state=OFFLINE, location=2113c16e5528,44435,1733360288855; forceNewPlan=false, retain=false 2024-12-05T01:01:00,066 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=57c59d9748bb6b0724486c75422f98c3, ASSIGN 2024-12-05T01:01:00,067 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=57c59d9748bb6b0724486c75422f98c3, ASSIGN; state=OFFLINE, location=2113c16e5528,41191,1733360288910; forceNewPlan=false, retain=false 2024-12-05T01:01:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-05T01:01:00,216 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:01:00,217 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=57c59d9748bb6b0724486c75422f98c3, regionState=OPENING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:01:00,217 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=9a1f6e5618d93f9950f5d9b8364adc17, regionState=OPENING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:01:00,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=57c59d9748bb6b0724486c75422f98c3, ASSIGN because future has completed 2024-12-05T01:01:00,226 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 57c59d9748bb6b0724486c75422f98c3, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:01:00,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9a1f6e5618d93f9950f5d9b8364adc17, ASSIGN because future has completed 2024-12-05T01:01:00,228 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:01:00,384 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:00,384 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => 57c59d9748bb6b0724486c75422f98c3, NAME => 'testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T01:01:00,385 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. service=AccessControlService 2024-12-05T01:01:00,385 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:01:00,385 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:00,385 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:01:00,385 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:00,385 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:00,385 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:00,385 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 9a1f6e5618d93f9950f5d9b8364adc17, NAME => 'testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T01:01:00,386 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. service=AccessControlService 2024-12-05T01:01:00,386 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:01:00,386 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:00,386 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:01:00,387 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:00,387 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:00,400 INFO [StoreOpener-57c59d9748bb6b0724486c75422f98c3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:00,403 INFO [StoreOpener-57c59d9748bb6b0724486c75422f98c3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 57c59d9748bb6b0724486c75422f98c3 columnFamilyName cf 2024-12-05T01:01:00,403 DEBUG [StoreOpener-57c59d9748bb6b0724486c75422f98c3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:01:00,404 INFO [StoreOpener-57c59d9748bb6b0724486c75422f98c3-1 {}] regionserver.HStore(327): Store=57c59d9748bb6b0724486c75422f98c3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:01:00,404 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:00,405 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:00,406 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:00,410 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:00,410 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:00,413 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:00,417 INFO [StoreOpener-9a1f6e5618d93f9950f5d9b8364adc17-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:00,418 INFO [StoreOpener-9a1f6e5618d93f9950f5d9b8364adc17-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9a1f6e5618d93f9950f5d9b8364adc17 columnFamilyName cf 2024-12-05T01:01:00,418 DEBUG [StoreOpener-9a1f6e5618d93f9950f5d9b8364adc17-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:01:00,419 INFO [StoreOpener-9a1f6e5618d93f9950f5d9b8364adc17-1 {}] regionserver.HStore(327): Store=9a1f6e5618d93f9950f5d9b8364adc17/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:01:00,419 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:00,421 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:00,422 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:00,422 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:00,422 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:00,425 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:00,441 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:01:00,442 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened 57c59d9748bb6b0724486c75422f98c3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65358772, jitterRate=-0.026078402996063232}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:01:00,442 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:00,443 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for 57c59d9748bb6b0724486c75422f98c3: Running coprocessor pre-open hook at 1733360460386Writing region info on filesystem at 1733360460386Initializing all the Stores at 1733360460386Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360460386Cleaning up temporary data from old regions at 1733360460410 (+24 ms)Running coprocessor post-open hooks at 1733360460442 (+32 ms)Region opened successfully at 1733360460443 (+1 ms) 2024-12-05T01:01:00,444 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3., pid=99, masterSystemTime=1733360460379 2024-12-05T01:01:00,446 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:01:00,447 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:00,447 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:00,447 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 9a1f6e5618d93f9950f5d9b8364adc17; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61514676, jitterRate=-0.08335989713668823}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:01:00,447 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:00,447 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 9a1f6e5618d93f9950f5d9b8364adc17: Running coprocessor pre-open hook at 1733360460387Writing region info on filesystem at 1733360460387Initializing all the Stores at 1733360460388 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360460388Cleaning up temporary data from old regions at 1733360460422 (+34 ms)Running coprocessor post-open hooks at 1733360460447 (+25 ms)Region opened successfully at 1733360460447 2024-12-05T01:01:00,447 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=57c59d9748bb6b0724486c75422f98c3, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:01:00,448 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17., pid=100, masterSystemTime=1733360460381 2024-12-05T01:01:00,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 57c59d9748bb6b0724486c75422f98c3, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:01:00,451 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:00,451 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:00,451 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=9a1f6e5618d93f9950f5d9b8364adc17, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:01:00,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:01:00,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-12-05T01:01:00,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure 57c59d9748bb6b0724486c75422f98c3, server=2113c16e5528,41191,1733360288910 in 226 msec 2024-12-05T01:01:00,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-12-05T01:01:00,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17, server=2113c16e5528,44435,1733360288855 in 228 msec 2024-12-05T01:01:00,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=57c59d9748bb6b0724486c75422f98c3, ASSIGN in 395 msec 2024-12-05T01:01:00,464 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-12-05T01:01:00,464 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9a1f6e5618d93f9950f5d9b8364adc17, ASSIGN in 398 msec 2024-12-05T01:01:00,465 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:01:00,465 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360460465"}]},"ts":"1733360460465"} 2024-12-05T01:01:00,468 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-05T01:01:00,469 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:01:00,470 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-05T01:01:00,474 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T01:01:00,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:00,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:00,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:00,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:00,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T01:01:00,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T01:01:00,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T01:01:00,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T01:01:00,481 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T01:01:00,481 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T01:01:00,482 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T01:01:00,482 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T01:01:00,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 621 msec 2024-12-05T01:01:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-05T01:01:00,492 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-05T01:01:00,492 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-05T01:01:00,492 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:01:00,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-05T01:01:00,498 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:01:00,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-12-05T01:01:00,498 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T01:01:00,502 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T01:01:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360460502 (current time:1733360460502). 2024-12-05T01:01:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:01:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-05T01:01:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:01:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b98d0ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:01:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:01:00,510 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:01:00,510 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:01:00,510 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:01:00,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@601283a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:00,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:01:00,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:01:00,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:00,512 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50256, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:01:00,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b2dcec1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:00,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:01:00,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:01:00,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:01:00,516 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59772, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:01:00,517 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:01:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:01:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:00,518 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:01:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73234dc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:01:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:01:00,519 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:01:00,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:01:00,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:01:00,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75030fd8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:00,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:01:00,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:01:00,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:00,521 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50284, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:01:00,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5395f719, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:01:00,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:01:00,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:01:00,525 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59776, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:01:00,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:01:00,529 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:01:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:01:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T01:01:00,530 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:01:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:01:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T01:01:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-05T01:01:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-05T01:01:00,544 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:01:00,553 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:01:00,559 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:01:00,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742021_1197 (size=170) 2024-12-05T01:01:00,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742021_1197 (size=170) 2024-12-05T01:01:00,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742021_1197 (size=170) 2024-12-05T01:01:00,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-05T01:01:00,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-05T01:01:01,014 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:01:01,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57c59d9748bb6b0724486c75422f98c3}] 2024-12-05T01:01:01,016 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:01,017 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:01,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-05T01:01:01,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-05T01:01:01,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:01,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for 9a1f6e5618d93f9950f5d9b8364adc17: 2024-12-05T01:01:01,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. for emptySnaptb0-testExportFileSystemState completed. 2024-12-05T01:01:01,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-05T01:01:01,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:01:01,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:01:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-05T01:01:01,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:01,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 57c59d9748bb6b0724486c75422f98c3: 2024-12-05T01:01:01,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. for emptySnaptb0-testExportFileSystemState completed. 2024-12-05T01:01:01,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-05T01:01:01,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:01:01,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:01:01,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742023_1199 (size=71) 2024-12-05T01:01:01,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:01,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-05T01:01:01,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742023_1199 (size=71) 2024-12-05T01:01:01,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742022_1198 (size=71) 2024-12-05T01:01:01,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742023_1199 (size=71) 2024-12-05T01:01:01,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-05T01:01:01,316 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:01,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742022_1198 (size=71) 2024-12-05T01:01:01,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742022_1198 (size=71) 2024-12-05T01:01:01,316 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:01,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:01,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-05T01:01:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-05T01:01:01,318 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:01,319 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:01,321 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 57c59d9748bb6b0724486c75422f98c3 in 303 msec 2024-12-05T01:01:01,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=102, resume processing ppid=101 2024-12-05T01:01:01,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17 in 306 msec 2024-12-05T01:01:01,324 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:01:01,325 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:01:01,327 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:01:01,327 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-05T01:01:01,328 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-05T01:01:01,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742024_1200 (size=552) 2024-12-05T01:01:01,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742024_1200 (size=552) 2024-12-05T01:01:01,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742024_1200 (size=552) 2024-12-05T01:01:01,450 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:01:01,471 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:01:01,472 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-05T01:01:01,478 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:01:01,478 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-05T01:01:01,480 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 944 msec 2024-12-05T01:01:01,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-05T01:01:01,683 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-05T01:01:01,690 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='0a80088cbb65ecdafa90744025c5426f9', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:01:01,693 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='10f1bd0f7d5e1c7c3b09ceeee43ff780c', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:01:01,696 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='2550310dec4df49461471f088b10049d6', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:01:01,698 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='3957745169980be906bd3d403ee3dff45', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:01:01,699 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='51cca15273e2da392e4f010719ab91687', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:01:01,701 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='4349d54c154ddd89f212807c106521b03', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:01:01,702 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='6d2c2ab9b59bd0bb800db58dbdd8f1a04', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:01:01,705 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44435 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:01:01,708 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41191 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:01:01,713 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T01:01:01,717 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-05T01:01:01,717 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:01,717 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:01:01,720 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T01:01:01,730 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T01:01:01,740 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T01:01:01,773 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T01:01:01,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360461773 (current time:1733360461773). 2024-12-05T01:01:01,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:01:01,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-05T01:01:01,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:01:01,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ba09772, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:01,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:01:01,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:01:01,806 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:01:01,806 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:01:01,806 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:01:01,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@202db95d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:01,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:01:01,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:01:01,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:01,809 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51596, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:01:01,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52dc8d68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:01,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:01:01,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:01:01,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:01:01,813 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56212, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:01:01,815 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:01:01,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:01:01,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:01,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:01,815 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:01:01,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49cbefd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:01,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:01:01,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:01:01,823 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:01:01,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:01:01,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:01:01,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46864b1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:01,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:01:01,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:01:01,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:01,826 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51612, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:01:01,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79749db0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:01,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:01:01,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:01:01,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:01:01,831 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56218, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:01:01,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:01:01,835 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:01:01,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:01:01,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:01,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:01,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T01:01:01,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:01:01,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T01:01:01,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-05T01:01:01,839 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:01:01,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-05T01:01:01,842 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:01:01,845 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:01:01,845 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:01:01,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-05T01:01:01,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742025_1201 (size=165) 2024-12-05T01:01:01,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742025_1201 (size=165) 2024-12-05T01:01:01,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742025_1201 (size=165) 2024-12-05T01:01:01,988 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:01:01,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57c59d9748bb6b0724486c75422f98c3}] 2024-12-05T01:01:01,989 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:01,990 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:02,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-05T01:01:02,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:02,143 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing 9a1f6e5618d93f9950f5d9b8364adc17 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-05T01:01:02,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-05T01:01:02,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:02,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing 57c59d9748bb6b0724486c75422f98c3 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-05T01:01:02,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-05T01:01:02,188 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/.tmp/cf/bc01e1bbd0764b19bf37368df4351f0d is 71, key is 003f3fb6465425df47bf9135a096de05/cf:q/1733360461705/Put/seqid=0 2024-12-05T01:01:02,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/.tmp/cf/4e7a0af5bf34456c905a509a1fd5126a is 71, key is 13427e8514fedb2316de107f790cc79d/cf:q/1733360461708/Put/seqid=0 2024-12-05T01:01:02,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742026_1202 (size=5354) 2024-12-05T01:01:02,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742026_1202 (size=5354) 2024-12-05T01:01:02,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742026_1202 (size=5354) 2024-12-05T01:01:02,343 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/.tmp/cf/bc01e1bbd0764b19bf37368df4351f0d 2024-12-05T01:01:02,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/.tmp/cf/bc01e1bbd0764b19bf37368df4351f0d as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/cf/bc01e1bbd0764b19bf37368df4351f0d 2024-12-05T01:01:02,367 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/cf/bc01e1bbd0764b19bf37368df4351f0d, entries=4, sequenceid=6, filesize=5.2 K 2024-12-05T01:01:02,370 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 9a1f6e5618d93f9950f5d9b8364adc17 in 227ms, sequenceid=6, compaction requested=false 2024-12-05T01:01:02,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-05T01:01:02,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for 9a1f6e5618d93f9950f5d9b8364adc17: 2024-12-05T01:01:02,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. for snaptb0-testExportFileSystemState completed. 2024-12-05T01:01:02,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-05T01:01:02,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:01:02,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/cf/bc01e1bbd0764b19bf37368df4351f0d] hfiles 2024-12-05T01:01:02,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/cf/bc01e1bbd0764b19bf37368df4351f0d for snapshot=snaptb0-testExportFileSystemState 2024-12-05T01:01:02,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742027_1203 (size=8258) 2024-12-05T01:01:02,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742027_1203 (size=8258) 2024-12-05T01:01:02,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742027_1203 (size=8258) 2024-12-05T01:01:02,392 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/.tmp/cf/4e7a0af5bf34456c905a509a1fd5126a 2024-12-05T01:01:02,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/.tmp/cf/4e7a0af5bf34456c905a509a1fd5126a as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/cf/4e7a0af5bf34456c905a509a1fd5126a 2024-12-05T01:01:02,415 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/cf/4e7a0af5bf34456c905a509a1fd5126a, entries=46, sequenceid=6, filesize=8.1 K 2024-12-05T01:01:02,417 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 57c59d9748bb6b0724486c75422f98c3 in 273ms, sequenceid=6, compaction requested=false 2024-12-05T01:01:02,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for 57c59d9748bb6b0724486c75422f98c3: 2024-12-05T01:01:02,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. for snaptb0-testExportFileSystemState completed. 2024-12-05T01:01:02,418 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-05T01:01:02,418 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:01:02,418 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/cf/4e7a0af5bf34456c905a509a1fd5126a] hfiles 2024-12-05T01:01:02,418 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/cf/4e7a0af5bf34456c905a509a1fd5126a for snapshot=snaptb0-testExportFileSystemState 2024-12-05T01:01:02,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-05T01:01:02,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742028_1204 (size=110) 2024-12-05T01:01:02,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742028_1204 (size=110) 2024-12-05T01:01:02,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742028_1204 (size=110) 2024-12-05T01:01:02,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:02,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-05T01:01:02,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-05T01:01:02,533 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:02,533 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:02,538 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17 in 546 msec 2024-12-05T01:01:02,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742029_1205 (size=110) 2024-12-05T01:01:02,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742029_1205 (size=110) 2024-12-05T01:01:02,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742029_1205 (size=110) 2024-12-05T01:01:02,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:02,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-05T01:01:02,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-05T01:01:02,609 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:02,609 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:02,612 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=104 2024-12-05T01:01:02,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 57c59d9748bb6b0724486c75422f98c3 in 622 msec 2024-12-05T01:01:02,613 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:01:02,613 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:01:02,614 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:01:02,614 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-05T01:01:02,615 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-05T01:01:02,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742030_1206 (size=630) 2024-12-05T01:01:02,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742030_1206 (size=630) 2024-12-05T01:01:02,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742030_1206 (size=630) 2024-12-05T01:01:02,753 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:01:02,799 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:01:02,799 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-05T01:01:02,801 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:01:02,801 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-05T01:01:02,803 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 964 msec 2024-12-05T01:01:02,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-05T01:01:02,973 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-05T01:01:02,974 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360462974 2024-12-05T01:01:02,974 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37149, tgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360462974, rawTgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360462974, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:01:03,053 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:01:03,053 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360462974, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360462974/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-05T01:01:03,056 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T01:01:03,082 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360462974/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-05T01:01:03,104 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0003_000001 (auth:SIMPLE) from 127.0.0.1:58708 2024-12-05T01:01:03,193 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0003/container_1733360294895_0003_01_000001/launch_container.sh] 2024-12-05T01:01:03,193 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0003/container_1733360294895_0003_01_000001/container_tokens] 2024-12-05T01:01:03,193 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0003/container_1733360294895_0003_01_000001/sysfs] 2024-12-05T01:01:03,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742031_1207 (size=630) 2024-12-05T01:01:03,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742031_1207 (size=630) 2024-12-05T01:01:03,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742031_1207 (size=630) 2024-12-05T01:01:03,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742032_1208 (size=165) 2024-12-05T01:01:03,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742032_1208 (size=165) 2024-12-05T01:01:03,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742032_1208 (size=165) 2024-12-05T01:01:03,276 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:03,277 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:03,277 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:04,625 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:01:04,876 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-11730478972859160430.jar 2024-12-05T01:01:04,877 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:04,877 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:04,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-2747736124435617578.jar 2024-12-05T01:01:04,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:04,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:04,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:04,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:04,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:04,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:04,993 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T01:01:04,993 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T01:01:04,993 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T01:01:04,994 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T01:01:04,994 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T01:01:04,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T01:01:04,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T01:01:04,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T01:01:04,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T01:01:04,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T01:01:04,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T01:01:04,997 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:04,997 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:04,998 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:01:04,998 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:04,999 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:05,000 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:01:05,000 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:01:05,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742033_1209 (size=24020) 2024-12-05T01:01:05,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742033_1209 (size=24020) 2024-12-05T01:01:05,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742033_1209 (size=24020) 2024-12-05T01:01:05,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742034_1210 (size=77755) 2024-12-05T01:01:05,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742034_1210 (size=77755) 2024-12-05T01:01:05,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742034_1210 (size=77755) 2024-12-05T01:01:05,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742035_1211 (size=131360) 2024-12-05T01:01:05,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742035_1211 (size=131360) 2024-12-05T01:01:05,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742035_1211 (size=131360) 2024-12-05T01:01:05,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742036_1212 (size=111793) 2024-12-05T01:01:05,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742036_1212 (size=111793) 2024-12-05T01:01:05,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742036_1212 (size=111793) 2024-12-05T01:01:05,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742037_1213 (size=1832290) 2024-12-05T01:01:05,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742037_1213 (size=1832290) 2024-12-05T01:01:05,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742037_1213 (size=1832290) 2024-12-05T01:01:05,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742038_1214 (size=8360282) 2024-12-05T01:01:05,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742038_1214 (size=8360282) 2024-12-05T01:01:05,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742038_1214 (size=8360282) 2024-12-05T01:01:05,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742039_1215 (size=443171) 2024-12-05T01:01:05,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742039_1215 (size=443171) 2024-12-05T01:01:05,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742039_1215 (size=443171) 2024-12-05T01:01:05,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742040_1216 (size=503880) 2024-12-05T01:01:05,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742040_1216 (size=503880) 2024-12-05T01:01:05,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742040_1216 (size=503880) 2024-12-05T01:01:05,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742041_1217 (size=322274) 2024-12-05T01:01:05,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742041_1217 (size=322274) 2024-12-05T01:01:05,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742041_1217 (size=322274) 2024-12-05T01:01:05,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742042_1218 (size=20406) 2024-12-05T01:01:05,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742042_1218 (size=20406) 2024-12-05T01:01:05,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742042_1218 (size=20406) 2024-12-05T01:01:06,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742043_1219 (size=45609) 2024-12-05T01:01:06,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742043_1219 (size=45609) 2024-12-05T01:01:06,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742043_1219 (size=45609) 2024-12-05T01:01:06,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742044_1220 (size=136454) 2024-12-05T01:01:06,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742044_1220 (size=136454) 2024-12-05T01:01:06,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742044_1220 (size=136454) 2024-12-05T01:01:06,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742045_1221 (size=1597136) 2024-12-05T01:01:06,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742045_1221 (size=1597136) 2024-12-05T01:01:06,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742045_1221 (size=1597136) 2024-12-05T01:01:06,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742046_1222 (size=30873) 2024-12-05T01:01:06,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742046_1222 (size=30873) 2024-12-05T01:01:06,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742046_1222 (size=30873) 2024-12-05T01:01:06,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742047_1223 (size=29229) 2024-12-05T01:01:06,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742047_1223 (size=29229) 2024-12-05T01:01:06,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742047_1223 (size=29229) 2024-12-05T01:01:06,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742048_1224 (size=903858) 2024-12-05T01:01:06,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742048_1224 (size=903858) 2024-12-05T01:01:06,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742048_1224 (size=903858) 2024-12-05T01:01:06,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742049_1225 (size=6424744) 2024-12-05T01:01:06,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742049_1225 (size=6424744) 2024-12-05T01:01:06,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742049_1225 (size=6424744) 2024-12-05T01:01:06,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742050_1226 (size=5175431) 2024-12-05T01:01:06,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742050_1226 (size=5175431) 2024-12-05T01:01:06,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742050_1226 (size=5175431) 2024-12-05T01:01:06,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742051_1227 (size=232881) 2024-12-05T01:01:06,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742051_1227 (size=232881) 2024-12-05T01:01:06,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742051_1227 (size=232881) 2024-12-05T01:01:06,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742052_1228 (size=1323991) 2024-12-05T01:01:06,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742052_1228 (size=1323991) 2024-12-05T01:01:06,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742052_1228 (size=1323991) 2024-12-05T01:01:06,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742053_1229 (size=4695811) 2024-12-05T01:01:06,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742053_1229 (size=4695811) 2024-12-05T01:01:06,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742053_1229 (size=4695811) 2024-12-05T01:01:06,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742054_1230 (size=1877034) 2024-12-05T01:01:06,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742054_1230 (size=1877034) 2024-12-05T01:01:06,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742054_1230 (size=1877034) 2024-12-05T01:01:06,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742055_1231 (size=217555) 2024-12-05T01:01:06,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742055_1231 (size=217555) 2024-12-05T01:01:06,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742055_1231 (size=217555) 2024-12-05T01:01:06,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742056_1232 (size=4188619) 2024-12-05T01:01:06,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742056_1232 (size=4188619) 2024-12-05T01:01:06,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742056_1232 (size=4188619) 2024-12-05T01:01:06,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742057_1233 (size=127628) 2024-12-05T01:01:06,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742057_1233 (size=127628) 2024-12-05T01:01:06,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742057_1233 (size=127628) 2024-12-05T01:01:06,622 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T01:01:06,625 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-05T01:01:06,627 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-05T01:01:06,627 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-05T01:01:06,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742058_1234 (size=447) 2024-12-05T01:01:06,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742058_1234 (size=447) 2024-12-05T01:01:06,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742058_1234 (size=447) 2024-12-05T01:01:06,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742059_1235 (size=21) 2024-12-05T01:01:06,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742059_1235 (size=21) 2024-12-05T01:01:06,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742059_1235 (size=21) 2024-12-05T01:01:06,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742060_1236 (size=304089) 2024-12-05T01:01:06,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742060_1236 (size=304089) 2024-12-05T01:01:06,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742060_1236 (size=304089) 2024-12-05T01:01:06,678 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:01:06,678 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:01:06,697 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:01:06,900 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0004_000001 (auth:SIMPLE) from 127.0.0.1:60964 2024-12-05T01:01:08,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-05T01:01:08,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-05T01:01:08,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-05T01:01:08,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-05T01:01:11,808 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0004_000001 (auth:SIMPLE) from 127.0.0.1:60086 2024-12-05T01:01:12,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742061_1237 (size=349787) 2024-12-05T01:01:12,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742061_1237 (size=349787) 2024-12-05T01:01:12,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742061_1237 (size=349787) 2024-12-05T01:01:13,807 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:01:14,077 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0004_000001 (auth:SIMPLE) from 127.0.0.1:40156 2024-12-05T01:01:14,078 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0004_000001 (auth:SIMPLE) from 127.0.0.1:49872 2024-12-05T01:01:14,469 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 57c59d9748bb6b0724486c75422f98c3 changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:01:14,470 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 9a1f6e5618d93f9950f5d9b8364adc17 changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:01:18,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742062_1238 (size=8258) 2024-12-05T01:01:18,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742062_1238 (size=8258) 2024-12-05T01:01:18,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742062_1238 (size=8258) 2024-12-05T01:01:18,610 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0004/container_1733360294895_0004_01_000002/launch_container.sh] 2024-12-05T01:01:18,610 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0004/container_1733360294895_0004_01_000002/container_tokens] 2024-12-05T01:01:18,610 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0004/container_1733360294895_0004_01_000002/sysfs] 2024-12-05T01:01:19,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742064_1240 (size=5354) 2024-12-05T01:01:19,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742064_1240 (size=5354) 2024-12-05T01:01:19,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742064_1240 (size=5354) 2024-12-05T01:01:19,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742063_1239 (size=22168) 2024-12-05T01:01:19,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742063_1239 (size=22168) 2024-12-05T01:01:19,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742063_1239 (size=22168) 2024-12-05T01:01:19,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742065_1241 (size=466) 2024-12-05T01:01:19,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742065_1241 (size=466) 2024-12-05T01:01:19,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742065_1241 (size=466) 2024-12-05T01:01:19,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742066_1242 (size=22168) 2024-12-05T01:01:19,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742066_1242 (size=22168) 2024-12-05T01:01:19,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742066_1242 (size=22168) 2024-12-05T01:01:19,857 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0004/container_1733360294895_0004_01_000003/launch_container.sh] 2024-12-05T01:01:19,857 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0004/container_1733360294895_0004_01_000003/container_tokens] 2024-12-05T01:01:19,857 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0004/container_1733360294895_0004_01_000003/sysfs] 2024-12-05T01:01:19,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742067_1243 (size=349787) 2024-12-05T01:01:19,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742067_1243 (size=349787) 2024-12-05T01:01:19,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742067_1243 (size=349787) 2024-12-05T01:01:19,877 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0004_000001 (auth:SIMPLE) from 127.0.0.1:40164 2024-12-05T01:01:21,835 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T01:01:21,836 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T01:01:21,841 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-05T01:01:21,841 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T01:01:21,842 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T01:01:21,842 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-05T01:01:21,843 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-05T01:01:21,843 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-05T01:01:21,843 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360462974/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360462974/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-05T01:01:21,843 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360462974/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-05T01:01:21,843 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360462974/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-05T01:01:21,861 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-05T01:01:21,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-05T01:01:21,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-05T01:01:21,865 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360481864"}]},"ts":"1733360481864"} 2024-12-05T01:01:21,867 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-05T01:01:21,867 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-05T01:01:21,868 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-05T01:01:21,869 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9a1f6e5618d93f9950f5d9b8364adc17, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=57c59d9748bb6b0724486c75422f98c3, UNASSIGN}] 2024-12-05T01:01:21,870 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=57c59d9748bb6b0724486c75422f98c3, UNASSIGN 2024-12-05T01:01:21,870 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9a1f6e5618d93f9950f5d9b8364adc17, UNASSIGN 2024-12-05T01:01:21,871 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=57c59d9748bb6b0724486c75422f98c3, regionState=CLOSING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:01:21,871 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=9a1f6e5618d93f9950f5d9b8364adc17, regionState=CLOSING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:01:21,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=57c59d9748bb6b0724486c75422f98c3, UNASSIGN because future has completed 2024-12-05T01:01:21,875 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:01:21,875 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 57c59d9748bb6b0724486c75422f98c3, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:01:21,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9a1f6e5618d93f9950f5d9b8364adc17, UNASSIGN because future has completed 2024-12-05T01:01:21,877 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:01:21,877 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:01:21,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-05T01:01:22,034 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:22,035 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:22,035 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:01:22,035 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:01:22,035 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing 57c59d9748bb6b0724486c75422f98c3, disabling compactions & flushes 2024-12-05T01:01:22,035 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing 9a1f6e5618d93f9950f5d9b8364adc17, disabling compactions & flushes 2024-12-05T01:01:22,035 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:22,035 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:22,035 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:22,035 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:22,035 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. after waiting 0 ms 2024-12-05T01:01:22,035 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. after waiting 0 ms 2024-12-05T01:01:22,035 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:22,035 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:22,092 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:01:22,093 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:01:22,093 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3. 2024-12-05T01:01:22,093 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for 57c59d9748bb6b0724486c75422f98c3: Waiting for close lock at 1733360482035Running coprocessor pre-close hooks at 1733360482035Disabling compacts and flushes for region at 1733360482035Disabling writes for close at 1733360482035Writing region close event to WAL at 1733360482051 (+16 ms)Running coprocessor post-close hooks at 1733360482093 (+42 ms)Closed at 1733360482093 2024-12-05T01:01:22,094 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:01:22,095 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:01:22,095 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17. 2024-12-05T01:01:22,095 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for 9a1f6e5618d93f9950f5d9b8364adc17: Waiting for close lock at 1733360482035Running coprocessor pre-close hooks at 1733360482035Disabling compacts and flushes for region at 1733360482035Disabling writes for close at 1733360482035Writing region close event to WAL at 1733360482053 (+18 ms)Running coprocessor post-close hooks at 1733360482095 (+42 ms)Closed at 1733360482095 2024-12-05T01:01:22,096 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed 57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:22,097 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=57c59d9748bb6b0724486c75422f98c3, regionState=CLOSED 2024-12-05T01:01:22,097 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed 9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:22,098 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=9a1f6e5618d93f9950f5d9b8364adc17, regionState=CLOSED 2024-12-05T01:01:22,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 57c59d9748bb6b0724486c75422f98c3, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:01:22,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:01:22,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-12-05T01:01:22,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure 57c59d9748bb6b0724486c75422f98c3, server=2113c16e5528,41191,1733360288910 in 227 msec 2024-12-05T01:01:22,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=109 2024-12-05T01:01:22,106 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=57c59d9748bb6b0724486c75422f98c3, UNASSIGN in 236 msec 2024-12-05T01:01:22,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure 9a1f6e5618d93f9950f5d9b8364adc17, server=2113c16e5528,44435,1733360288855 in 227 msec 2024-12-05T01:01:22,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=108 2024-12-05T01:01:22,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9a1f6e5618d93f9950f5d9b8364adc17, UNASSIGN in 237 msec 2024-12-05T01:01:22,113 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-05T01:01:22,113 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 242 msec 2024-12-05T01:01:22,115 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360482115"}]},"ts":"1733360482115"} 2024-12-05T01:01:22,118 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-05T01:01:22,118 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-05T01:01:22,121 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 258 msec 2024-12-05T01:01:22,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-05T01:01:22,182 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-05T01:01:22,183 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-05T01:01:22,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T01:01:22,185 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T01:01:22,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-05T01:01:22,187 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T01:01:22,193 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:22,193 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-05T01:01:22,193 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:22,196 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/recovered.edits] 2024-12-05T01:01:22,197 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/recovered.edits] 2024-12-05T01:01:22,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T01:01:22,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T01:01:22,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T01:01:22,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T01:01:22,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-05T01:01:22,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-05T01:01:22,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-05T01:01:22,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T01:01:22,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T01:01:22,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:22,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data null 2024-12-05T01:01:22,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:22,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T01:01:22,200 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-05T01:01:22,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:22,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:22,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-05T01:01:22,203 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/cf/4e7a0af5bf34456c905a509a1fd5126a to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/cf/4e7a0af5bf34456c905a509a1fd5126a 2024-12-05T01:01:22,203 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/cf/bc01e1bbd0764b19bf37368df4351f0d to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/cf/bc01e1bbd0764b19bf37368df4351f0d 2024-12-05T01:01:22,208 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3/recovered.edits/9.seqid 2024-12-05T01:01:22,209 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/57c59d9748bb6b0724486c75422f98c3 2024-12-05T01:01:22,209 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17/recovered.edits/9.seqid 2024-12-05T01:01:22,210 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemState/9a1f6e5618d93f9950f5d9b8364adc17 2024-12-05T01:01:22,210 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-05T01:01:22,214 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T01:01:22,218 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-05T01:01:22,223 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-05T01:01:22,225 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T01:01:22,225 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-05T01:01:22,225 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360482225"}]},"ts":"9223372036854775807"} 2024-12-05T01:01:22,225 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360482225"}]},"ts":"9223372036854775807"} 2024-12-05T01:01:22,228 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T01:01:22,228 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 9a1f6e5618d93f9950f5d9b8364adc17, NAME => 'testtb-testExportFileSystemState,,1733360459861.9a1f6e5618d93f9950f5d9b8364adc17.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 57c59d9748bb6b0724486c75422f98c3, NAME => 'testtb-testExportFileSystemState,1,1733360459861.57c59d9748bb6b0724486c75422f98c3.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T01:01:22,229 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-05T01:01:22,229 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360482229"}]},"ts":"9223372036854775807"} 2024-12-05T01:01:22,231 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-05T01:01:22,232 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T01:01:22,234 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 49 msec 2024-12-05T01:01:22,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-05T01:01:22,313 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-05T01:01:22,313 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-05T01:01:22,333 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-05T01:01:22,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-05T01:01:22,349 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-05T01:01:22,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-05T01:01:22,388 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=801 (was 789) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:51992 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46537 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:42102 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3590 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 23197) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:46537 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:53040 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_725709206_1 at /127.0.0.1:58316 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_725709206_1 at /127.0.0.1:39940 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=803 (was 806), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=710 (was 616) - SystemLoadAverage LEAK? -, ProcessCount=22 (was 29), AvailableMemoryMB=5600 (was 5737) 2024-12-05T01:01:22,388 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-12-05T01:01:22,415 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=801, OpenFileDescriptor=803, MaxFileDescriptor=1048576, SystemLoadAverage=710, ProcessCount=24, AvailableMemoryMB=5594 2024-12-05T01:01:22,415 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-12-05T01:01:22,417 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:01:22,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-05T01:01:22,420 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:01:22,420 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:01:22,421 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-05T01:01:22,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-05T01:01:22,424 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:01:22,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742068_1244 (size=404) 2024-12-05T01:01:22,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742068_1244 (size=404) 2024-12-05T01:01:22,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742068_1244 (size=404) 2024-12-05T01:01:22,465 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7d542f0249a2e2b0d1794004da037d9b, NAME => 'testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:01:22,466 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e5bf89c1ca44706b370e34cb6fcd5140, NAME => 'testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:01:22,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742070_1246 (size=65) 2024-12-05T01:01:22,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742070_1246 (size=65) 2024-12-05T01:01:22,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742070_1246 (size=65) 2024-12-05T01:01:22,513 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:01:22,513 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 7d542f0249a2e2b0d1794004da037d9b, disabling compactions & flushes 2024-12-05T01:01:22,513 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:22,513 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:22,513 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. after waiting 0 ms 2024-12-05T01:01:22,513 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:22,513 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:22,513 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7d542f0249a2e2b0d1794004da037d9b: Waiting for close lock at 1733360482513Disabling compacts and flushes for region at 1733360482513Disabling writes for close at 1733360482513Writing region close event to WAL at 1733360482513Closed at 1733360482513 2024-12-05T01:01:22,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742069_1245 (size=65) 2024-12-05T01:01:22,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742069_1245 (size=65) 2024-12-05T01:01:22,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742069_1245 (size=65) 2024-12-05T01:01:22,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:01:22,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing e5bf89c1ca44706b370e34cb6fcd5140, disabling compactions & flushes 2024-12-05T01:01:22,529 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:01:22,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:01:22,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. after waiting 0 ms 2024-12-05T01:01:22,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:01:22,530 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:01:22,530 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for e5bf89c1ca44706b370e34cb6fcd5140: Waiting for close lock at 1733360482529Disabling compacts and flushes for region at 1733360482529Disabling writes for close at 1733360482529Writing region close event to WAL at 1733360482529Closed at 1733360482529 2024-12-05T01:01:22,531 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:01:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-05T01:01:22,531 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733360482531"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360482531"}]},"ts":"1733360482531"} 2024-12-05T01:01:22,531 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733360482531"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360482531"}]},"ts":"1733360482531"} 2024-12-05T01:01:22,535 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:01:22,536 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:01:22,537 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360482536"}]},"ts":"1733360482536"} 2024-12-05T01:01:22,544 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-05T01:01:22,544 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:01:22,546 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:01:22,546 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:01:22,546 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:01:22,546 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:01:22,546 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:01:22,546 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:01:22,546 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:01:22,546 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:01:22,546 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:01:22,546 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:01:22,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7d542f0249a2e2b0d1794004da037d9b, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e5bf89c1ca44706b370e34cb6fcd5140, ASSIGN}] 2024-12-05T01:01:22,548 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e5bf89c1ca44706b370e34cb6fcd5140, ASSIGN 2024-12-05T01:01:22,548 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7d542f0249a2e2b0d1794004da037d9b, ASSIGN 2024-12-05T01:01:22,549 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e5bf89c1ca44706b370e34cb6fcd5140, ASSIGN; state=OFFLINE, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T01:01:22,549 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7d542f0249a2e2b0d1794004da037d9b, ASSIGN; state=OFFLINE, location=2113c16e5528,41191,1733360288910; forceNewPlan=false, retain=false 2024-12-05T01:01:22,699 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:01:22,700 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=e5bf89c1ca44706b370e34cb6fcd5140, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:01:22,700 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=7d542f0249a2e2b0d1794004da037d9b, regionState=OPENING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:01:22,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7d542f0249a2e2b0d1794004da037d9b, ASSIGN because future has completed 2024-12-05T01:01:22,703 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7d542f0249a2e2b0d1794004da037d9b, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:01:22,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e5bf89c1ca44706b370e34cb6fcd5140, ASSIGN because future has completed 2024-12-05T01:01:22,708 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:01:22,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-05T01:01:22,864 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:22,865 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 7d542f0249a2e2b0d1794004da037d9b, NAME => 'testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T01:01:22,865 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. service=AccessControlService 2024-12-05T01:01:22,865 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:01:22,865 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:22,865 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:01:22,866 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:22,866 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:22,876 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:01:22,876 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => e5bf89c1ca44706b370e34cb6fcd5140, NAME => 'testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T01:01:22,876 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. service=AccessControlService 2024-12-05T01:01:22,877 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:01:22,877 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:22,877 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:01:22,877 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:22,877 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:22,893 INFO [StoreOpener-7d542f0249a2e2b0d1794004da037d9b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:22,894 INFO [StoreOpener-7d542f0249a2e2b0d1794004da037d9b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d542f0249a2e2b0d1794004da037d9b columnFamilyName cf 2024-12-05T01:01:22,895 DEBUG [StoreOpener-7d542f0249a2e2b0d1794004da037d9b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:01:22,895 INFO [StoreOpener-7d542f0249a2e2b0d1794004da037d9b-1 {}] regionserver.HStore(327): Store=7d542f0249a2e2b0d1794004da037d9b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:01:22,896 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:22,896 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:22,897 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:22,897 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:22,897 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:22,900 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:22,904 INFO [StoreOpener-e5bf89c1ca44706b370e34cb6fcd5140-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:22,907 INFO [StoreOpener-e5bf89c1ca44706b370e34cb6fcd5140-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e5bf89c1ca44706b370e34cb6fcd5140 columnFamilyName cf 2024-12-05T01:01:22,907 DEBUG [StoreOpener-e5bf89c1ca44706b370e34cb6fcd5140-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:01:22,907 INFO [StoreOpener-e5bf89c1ca44706b370e34cb6fcd5140-1 {}] regionserver.HStore(327): Store=e5bf89c1ca44706b370e34cb6fcd5140/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:01:22,908 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:22,910 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:22,912 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:22,913 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:22,913 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:22,921 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:22,924 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:01:22,925 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 7d542f0249a2e2b0d1794004da037d9b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74567693, jitterRate=0.11114521324634552}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:01:22,925 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:22,926 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 7d542f0249a2e2b0d1794004da037d9b: Running coprocessor pre-open hook at 1733360482866Writing region info on filesystem at 1733360482866Initializing all the Stores at 1733360482868 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360482868Cleaning up temporary data from old regions at 1733360482897 (+29 ms)Running coprocessor post-open hooks at 1733360482925 (+28 ms)Region opened successfully at 1733360482926 (+1 ms) 2024-12-05T01:01:22,928 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b., pid=117, masterSystemTime=1733360482859 2024-12-05T01:01:22,930 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:22,930 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:22,931 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=7d542f0249a2e2b0d1794004da037d9b, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:01:22,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7d542f0249a2e2b0d1794004da037d9b, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:01:22,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=115 2024-12-05T01:01:22,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 7d542f0249a2e2b0d1794004da037d9b, server=2113c16e5528,41191,1733360288910 in 232 msec 2024-12-05T01:01:22,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7d542f0249a2e2b0d1794004da037d9b, ASSIGN in 391 msec 2024-12-05T01:01:22,944 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:01:22,945 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened e5bf89c1ca44706b370e34cb6fcd5140; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73126449, jitterRate=0.08966900408267975}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:01:22,945 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:22,945 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for e5bf89c1ca44706b370e34cb6fcd5140: Running coprocessor pre-open hook at 1733360482877Writing region info on filesystem at 1733360482877Initializing all the Stores at 1733360482890 (+13 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360482890Cleaning up temporary data from old regions at 1733360482913 (+23 ms)Running coprocessor post-open hooks at 1733360482945 (+32 ms)Region opened successfully at 1733360482945 2024-12-05T01:01:22,947 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140., pid=118, masterSystemTime=1733360482862 2024-12-05T01:01:22,949 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:01:22,949 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:01:22,950 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=e5bf89c1ca44706b370e34cb6fcd5140, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:01:22,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:01:22,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=116 2024-12-05T01:01:22,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140, server=2113c16e5528,32927,1733360288712 in 248 msec 2024-12-05T01:01:22,961 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=114 2024-12-05T01:01:22,961 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e5bf89c1ca44706b370e34cb6fcd5140, ASSIGN in 413 msec 2024-12-05T01:01:22,962 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:01:22,963 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360482962"}]},"ts":"1733360482962"} 2024-12-05T01:01:22,966 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-05T01:01:22,968 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:01:22,969 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-05T01:01:22,972 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-05T01:01:22,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:22,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:22,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:22,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:01:22,977 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-05T01:01:22,977 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-05T01:01:22,978 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-05T01:01:22,978 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-05T01:01:22,979 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 561 msec 2024-12-05T01:01:23,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-05T01:01:23,052 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-05T01:01:23,053 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-05T01:01:23,053 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:01:23,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-05T01:01:23,058 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:01:23,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-12-05T01:01:23,058 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T01:01:23,062 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-05T01:01:23,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360483063 (current time:1733360483063). 2024-12-05T01:01:23,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:01:23,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-05T01:01:23,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:01:23,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c534c7d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:01:23,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:01:23,071 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:01:23,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:01:23,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:01:23,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29397d35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:01:23,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:01:23,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,073 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51934, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:01:23,074 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66236206, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:01:23,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:01:23,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:01:23,077 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46046, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:01:23,078 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:01:23,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:01:23,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76a6ae88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:01:23,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:01:23,084 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:01:23,085 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:01:23,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:01:23,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:01:23,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d18581a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:01:23,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:01:23,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,089 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51954, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:01:23,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c712dc2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:01:23,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:01:23,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:01:23,096 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46048, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:01:23,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:01:23,100 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:01:23,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:01:23,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,101 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:01:23,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-05T01:01:23,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:01:23,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-05T01:01:23,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-05T01:01:23,107 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:01:23,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-05T01:01:23,109 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:01:23,112 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:01:23,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742071_1247 (size=161) 2024-12-05T01:01:23,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742071_1247 (size=161) 2024-12-05T01:01:23,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742071_1247 (size=161) 2024-12-05T01:01:23,197 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:01:23,197 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d542f0249a2e2b0d1794004da037d9b}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140}] 2024-12-05T01:01:23,198 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:23,199 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:23,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-05T01:01:23,352 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-05T01:01:23,352 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-05T01:01:23,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:01:23,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:23,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 7d542f0249a2e2b0d1794004da037d9b: 2024-12-05T01:01:23,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for e5bf89c1ca44706b370e34cb6fcd5140: 2024-12-05T01:01:23,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. for emptySnaptb0-testConsecutiveExports completed. 2024-12-05T01:01:23,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. for emptySnaptb0-testConsecutiveExports completed. 2024-12-05T01:01:23,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-05T01:01:23,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-05T01:01:23,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:01:23,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:01:23,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:01:23,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:01:23,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742072_1248 (size=68) 2024-12-05T01:01:23,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742072_1248 (size=68) 2024-12-05T01:01:23,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742072_1248 (size=68) 2024-12-05T01:01:23,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:23,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-05T01:01:23,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-05T01:01:23,383 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:23,384 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:23,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7d542f0249a2e2b0d1794004da037d9b in 188 msec 2024-12-05T01:01:23,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742073_1249 (size=68) 2024-12-05T01:01:23,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:01:23,398 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-05T01:01:23,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-05T01:01:23,398 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:23,399 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:23,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742073_1249 (size=68) 2024-12-05T01:01:23,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742073_1249 (size=68) 2024-12-05T01:01:23,402 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-12-05T01:01:23,402 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140 in 202 msec 2024-12-05T01:01:23,402 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:01:23,403 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:01:23,403 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:01:23,403 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-05T01:01:23,404 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-05T01:01:23,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-05T01:01:23,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742074_1250 (size=543) 2024-12-05T01:01:23,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742074_1250 (size=543) 2024-12-05T01:01:23,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742074_1250 (size=543) 2024-12-05T01:01:23,544 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:01:23,572 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:01:23,573 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-05T01:01:23,575 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:01:23,575 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-05T01:01:23,577 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 469 msec 2024-12-05T01:01:23,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-05T01:01:23,732 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-05T01:01:23,745 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='021f2b214f6c0ec9d1e578f96add984d2', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:01:23,752 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='204394f727ee8511cd53dc184870553e2', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:01:23,753 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='3cab65637827f70823e206dcdb9a23309', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:01:23,754 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='42709d10ce66ef81b1dbd2a9dd42880e0', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:01:23,755 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='528094ad1e8dc382e04cd228e1178a87d', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:01:23,756 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='1fe8f64d431fa09cdb969603457cca172', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:01:23,761 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='6c39ccd09f5c202d1a5ecbb46b71bdb1d', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:01:23,764 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='0386e97a6d0d2e8e1d66198953770053', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:01:23,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41191 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:01:23,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:01:23,787 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T01:01:23,802 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-05T01:01:23,802 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:23,806 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:01:23,809 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T01:01:23,818 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T01:01:23,827 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T01:01:23,830 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-05T01:01:23,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360483830 (current time:1733360483830). 2024-12-05T01:01:23,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:01:23,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-05T01:01:23,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:01:23,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fe3b08b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:01:23,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:01:23,833 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:01:23,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:01:23,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:01:23,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@724b5a9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:01:23,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:01:23,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,837 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51972, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:01:23,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15d86cd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:01:23,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:01:23,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:01:23,841 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46058, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:01:23,843 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:01:23,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:01:23,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,844 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:01:23,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d638941, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:01:23,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:01:23,846 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:01:23,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:01:23,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:01:23,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@854ab08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:01:23,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:01:23,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,848 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51998, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:01:23,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45245981, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:01:23,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:01:23,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:01:23,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:01:23,853 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46060, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:01:23,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:01:23,856 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:01:23,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:01:23,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:01:23,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-05T01:01:23,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:01:23,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-05T01:01:23,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-05T01:01:23,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-05T01:01:23,860 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:01:23,863 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:01:23,867 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:01:23,878 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:01:23,898 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-05T01:01:23,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742075_1251 (size=156) 2024-12-05T01:01:23,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742075_1251 (size=156) 2024-12-05T01:01:23,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742075_1251 (size=156) 2024-12-05T01:01:23,964 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:01:23,964 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d542f0249a2e2b0d1794004da037d9b}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140}] 2024-12-05T01:01:23,966 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:23,966 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:23,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-05T01:01:24,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-05T01:01:24,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-05T01:01:24,118 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:24,119 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 7d542f0249a2e2b0d1794004da037d9b 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-05T01:01:24,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:01:24,120 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing e5bf89c1ca44706b370e34cb6fcd5140 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-05T01:01:24,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/.tmp/cf/b31e97dc1ffe4492b17d12d48b0251fa is 71, key is 18fda13f3a546f29e82fa980b95b3b7e/cf:q/1733360483785/Put/seqid=0 2024-12-05T01:01:24,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/.tmp/cf/8bb6514d307e417b9c4ce86954718580 is 71, key is 0239263671af37b693b2cfd5c319ca1c/cf:q/1733360483779/Put/seqid=0 2024-12-05T01:01:24,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-05T01:01:24,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742076_1252 (size=5356) 2024-12-05T01:01:24,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742076_1252 (size=5356) 2024-12-05T01:01:24,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742076_1252 (size=5356) 2024-12-05T01:01:24,272 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/.tmp/cf/8bb6514d307e417b9c4ce86954718580 2024-12-05T01:01:24,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/.tmp/cf/8bb6514d307e417b9c4ce86954718580 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/cf/8bb6514d307e417b9c4ce86954718580 2024-12-05T01:01:24,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742077_1253 (size=8256) 2024-12-05T01:01:24,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742077_1253 (size=8256) 2024-12-05T01:01:24,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742077_1253 (size=8256) 2024-12-05T01:01:24,290 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/.tmp/cf/b31e97dc1ffe4492b17d12d48b0251fa 2024-12-05T01:01:24,299 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/cf/8bb6514d307e417b9c4ce86954718580, entries=4, sequenceid=6, filesize=5.2 K 2024-12-05T01:01:24,300 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 7d542f0249a2e2b0d1794004da037d9b in 181ms, sequenceid=6, compaction requested=false 2024-12-05T01:01:24,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 7d542f0249a2e2b0d1794004da037d9b: 2024-12-05T01:01:24,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. for snaptb0-testConsecutiveExports completed. 2024-12-05T01:01:24,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-05T01:01:24,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:01:24,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/cf/8bb6514d307e417b9c4ce86954718580] hfiles 2024-12-05T01:01:24,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/cf/8bb6514d307e417b9c4ce86954718580 for snapshot=snaptb0-testConsecutiveExports 2024-12-05T01:01:24,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/.tmp/cf/b31e97dc1ffe4492b17d12d48b0251fa as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/cf/b31e97dc1ffe4492b17d12d48b0251fa 2024-12-05T01:01:24,309 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/cf/b31e97dc1ffe4492b17d12d48b0251fa, entries=46, sequenceid=6, filesize=8.1 K 2024-12-05T01:01:24,318 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for e5bf89c1ca44706b370e34cb6fcd5140 in 198ms, sequenceid=6, compaction requested=false 2024-12-05T01:01:24,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for e5bf89c1ca44706b370e34cb6fcd5140: 2024-12-05T01:01:24,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. for snaptb0-testConsecutiveExports completed. 2024-12-05T01:01:24,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-05T01:01:24,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:01:24,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/cf/b31e97dc1ffe4492b17d12d48b0251fa] hfiles 2024-12-05T01:01:24,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/cf/b31e97dc1ffe4492b17d12d48b0251fa for snapshot=snaptb0-testConsecutiveExports 2024-12-05T01:01:24,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742078_1254 (size=107) 2024-12-05T01:01:24,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742078_1254 (size=107) 2024-12-05T01:01:24,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742078_1254 (size=107) 2024-12-05T01:01:24,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:01:24,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-05T01:01:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-05T01:01:24,470 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:24,470 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:01:24,474 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7d542f0249a2e2b0d1794004da037d9b in 507 msec 2024-12-05T01:01:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-05T01:01:24,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742079_1255 (size=107) 2024-12-05T01:01:24,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742079_1255 (size=107) 2024-12-05T01:01:24,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742079_1255 (size=107) 2024-12-05T01:01:24,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:01:24,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-05T01:01:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-05T01:01:24,576 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:24,576 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:01:24,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=124, resume processing ppid=122 2024-12-05T01:01:24,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140 in 614 msec 2024-12-05T01:01:24,582 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:01:24,583 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:01:24,584 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:01:24,584 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-05T01:01:24,585 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-05T01:01:24,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742080_1256 (size=621) 2024-12-05T01:01:24,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742080_1256 (size=621) 2024-12-05T01:01:24,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742080_1256 (size=621) 2024-12-05T01:01:24,806 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:01:24,833 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:01:24,833 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-05T01:01:24,851 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:01:24,851 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-05T01:01:24,853 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 993 msec 2024-12-05T01:01:25,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-05T01:01:25,003 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-05T01:01:25,003 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003 2024-12-05T01:01:25,003 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:01:25,069 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:01:25,070 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@5a301e32, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-05T01:01:25,074 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T01:01:25,126 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-05T01:01:25,272 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:25,272 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:25,273 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:26,084 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0004_000001 (auth:SIMPLE) from 127.0.0.1:45946 2024-12-05T01:01:26,119 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0004/container_1733360294895_0004_01_000001/launch_container.sh] 2024-12-05T01:01:26,119 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0004/container_1733360294895_0004_01_000001/container_tokens] 2024-12-05T01:01:26,119 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0004/container_1733360294895_0004_01_000001/sysfs] 2024-12-05T01:01:27,084 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-7543450564150198412.jar 2024-12-05T01:01:27,085 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:27,085 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:27,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-1044419128468472534.jar 2024-12-05T01:01:27,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:27,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:27,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:27,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:27,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:27,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:27,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T01:01:27,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T01:01:27,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T01:01:27,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T01:01:27,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T01:01:27,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T01:01:27,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T01:01:27,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T01:01:27,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T01:01:27,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T01:01:27,217 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T01:01:27,217 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:27,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:27,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:01:27,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:27,219 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:27,219 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:01:27,220 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:01:27,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742081_1257 (size=24020) 2024-12-05T01:01:27,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742081_1257 (size=24020) 2024-12-05T01:01:27,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742081_1257 (size=24020) 2024-12-05T01:01:27,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742082_1258 (size=6424744) 2024-12-05T01:01:27,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742082_1258 (size=6424744) 2024-12-05T01:01:27,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742082_1258 (size=6424744) 2024-12-05T01:01:27,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742083_1259 (size=77755) 2024-12-05T01:01:27,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742083_1259 (size=77755) 2024-12-05T01:01:27,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742083_1259 (size=77755) 2024-12-05T01:01:27,646 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:01:27,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742084_1260 (size=131360) 2024-12-05T01:01:27,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742084_1260 (size=131360) 2024-12-05T01:01:27,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742084_1260 (size=131360) 2024-12-05T01:01:27,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742085_1261 (size=111793) 2024-12-05T01:01:27,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742085_1261 (size=111793) 2024-12-05T01:01:27,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742085_1261 (size=111793) 2024-12-05T01:01:27,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742086_1262 (size=1832290) 2024-12-05T01:01:27,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742086_1262 (size=1832290) 2024-12-05T01:01:27,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742086_1262 (size=1832290) 2024-12-05T01:01:27,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742087_1263 (size=8360282) 2024-12-05T01:01:27,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742087_1263 (size=8360282) 2024-12-05T01:01:27,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742087_1263 (size=8360282) 2024-12-05T01:01:27,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742088_1264 (size=503880) 2024-12-05T01:01:27,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742088_1264 (size=503880) 2024-12-05T01:01:27,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742088_1264 (size=503880) 2024-12-05T01:01:27,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742089_1265 (size=322274) 2024-12-05T01:01:27,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742089_1265 (size=322274) 2024-12-05T01:01:27,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742089_1265 (size=322274) 2024-12-05T01:01:27,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742090_1266 (size=20406) 2024-12-05T01:01:27,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742090_1266 (size=20406) 2024-12-05T01:01:27,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742090_1266 (size=20406) 2024-12-05T01:01:27,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742091_1267 (size=45609) 2024-12-05T01:01:27,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742091_1267 (size=45609) 2024-12-05T01:01:27,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742091_1267 (size=45609) 2024-12-05T01:01:28,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742092_1268 (size=136454) 2024-12-05T01:01:28,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742092_1268 (size=136454) 2024-12-05T01:01:28,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742092_1268 (size=136454) 2024-12-05T01:01:28,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742093_1269 (size=1597136) 2024-12-05T01:01:28,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742093_1269 (size=1597136) 2024-12-05T01:01:28,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742093_1269 (size=1597136) 2024-12-05T01:01:28,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742094_1270 (size=30873) 2024-12-05T01:01:28,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742094_1270 (size=30873) 2024-12-05T01:01:28,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742094_1270 (size=30873) 2024-12-05T01:01:28,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742095_1271 (size=29229) 2024-12-05T01:01:28,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742095_1271 (size=29229) 2024-12-05T01:01:28,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742095_1271 (size=29229) 2024-12-05T01:01:28,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742096_1272 (size=903858) 2024-12-05T01:01:28,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742096_1272 (size=903858) 2024-12-05T01:01:28,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742096_1272 (size=903858) 2024-12-05T01:01:28,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742097_1273 (size=5175431) 2024-12-05T01:01:28,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742097_1273 (size=5175431) 2024-12-05T01:01:28,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742097_1273 (size=5175431) 2024-12-05T01:01:28,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742098_1274 (size=232881) 2024-12-05T01:01:28,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742098_1274 (size=232881) 2024-12-05T01:01:28,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742098_1274 (size=232881) 2024-12-05T01:01:28,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742099_1275 (size=1323991) 2024-12-05T01:01:28,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742099_1275 (size=1323991) 2024-12-05T01:01:28,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742099_1275 (size=1323991) 2024-12-05T01:01:28,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742100_1276 (size=4695811) 2024-12-05T01:01:28,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742100_1276 (size=4695811) 2024-12-05T01:01:28,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742100_1276 (size=4695811) 2024-12-05T01:01:28,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742101_1277 (size=1877034) 2024-12-05T01:01:28,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742101_1277 (size=1877034) 2024-12-05T01:01:28,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742101_1277 (size=1877034) 2024-12-05T01:01:28,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742102_1278 (size=443171) 2024-12-05T01:01:28,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742102_1278 (size=443171) 2024-12-05T01:01:28,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742102_1278 (size=443171) 2024-12-05T01:01:28,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-05T01:01:28,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-05T01:01:28,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742103_1279 (size=217555) 2024-12-05T01:01:28,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-05T01:01:28,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742103_1279 (size=217555) 2024-12-05T01:01:28,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742103_1279 (size=217555) 2024-12-05T01:01:28,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742104_1280 (size=4188619) 2024-12-05T01:01:28,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742104_1280 (size=4188619) 2024-12-05T01:01:28,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742104_1280 (size=4188619) 2024-12-05T01:01:28,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742105_1281 (size=127628) 2024-12-05T01:01:28,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742105_1281 (size=127628) 2024-12-05T01:01:28,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742105_1281 (size=127628) 2024-12-05T01:01:28,350 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T01:01:28,353 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-05T01:01:28,356 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-05T01:01:28,356 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-05T01:01:28,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742106_1282 (size=441) 2024-12-05T01:01:28,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742106_1282 (size=441) 2024-12-05T01:01:28,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742106_1282 (size=441) 2024-12-05T01:01:28,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742107_1283 (size=21) 2024-12-05T01:01:28,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742107_1283 (size=21) 2024-12-05T01:01:28,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742107_1283 (size=21) 2024-12-05T01:01:28,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742108_1284 (size=304128) 2024-12-05T01:01:28,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742108_1284 (size=304128) 2024-12-05T01:01:28,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742108_1284 (size=304128) 2024-12-05T01:01:28,432 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:01:28,433 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:01:28,850 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0005_000001 (auth:SIMPLE) from 127.0.0.1:59044 2024-12-05T01:01:33,807 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:01:33,964 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0005_000001 (auth:SIMPLE) from 127.0.0.1:42820 2024-12-05T01:01:34,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742109_1285 (size=349826) 2024-12-05T01:01:34,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742109_1285 (size=349826) 2024-12-05T01:01:34,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742109_1285 (size=349826) 2024-12-05T01:01:36,233 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0005_000001 (auth:SIMPLE) from 127.0.0.1:35978 2024-12-05T01:01:36,232 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0005_000001 (auth:SIMPLE) from 127.0.0.1:60354 2024-12-05T01:01:36,697 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:01:40,675 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0005/container_1733360294895_0005_01_000003/launch_container.sh] 2024-12-05T01:01:40,675 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0005/container_1733360294895_0005_01_000003/container_tokens] 2024-12-05T01:01:40,675 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0005/container_1733360294895_0005_01_000003/sysfs] 2024-12-05T01:01:42,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742110_1286 (size=22235) 2024-12-05T01:01:42,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742110_1286 (size=22235) 2024-12-05T01:01:42,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742110_1286 (size=22235) 2024-12-05T01:01:42,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742111_1287 (size=463) 2024-12-05T01:01:42,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742111_1287 (size=463) 2024-12-05T01:01:42,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742111_1287 (size=463) 2024-12-05T01:01:42,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742112_1288 (size=22235) 2024-12-05T01:01:42,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742112_1288 (size=22235) 2024-12-05T01:01:42,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742112_1288 (size=22235) 2024-12-05T01:01:42,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742113_1289 (size=349826) 2024-12-05T01:01:42,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742113_1289 (size=349826) 2024-12-05T01:01:42,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742113_1289 (size=349826) 2024-12-05T01:01:42,222 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_0/usercache/jenkins/appcache/application_1733360294895_0005/container_1733360294895_0005_01_000002/launch_container.sh] 2024-12-05T01:01:42,222 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_0/usercache/jenkins/appcache/application_1733360294895_0005/container_1733360294895_0005_01_000002/container_tokens] 2024-12-05T01:01:42,222 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_0/usercache/jenkins/appcache/application_1733360294895_0005/container_1733360294895_0005_01_000002/sysfs] 2024-12-05T01:01:42,230 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0005_000001 (auth:SIMPLE) from 127.0.0.1:52156 2024-12-05T01:01:43,571 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T01:01:43,572 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T01:01:43,574 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-05T01:01:43,574 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T01:01:43,575 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T01:01:43,575 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-05T01:01:43,575 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-05T01:01:43,575 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-05T01:01:43,575 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@5a301e32 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-05T01:01:43,575 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-05T01:01:43,575 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-05T01:01:43,577 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:01:43,606 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:01:43,606 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@5a301e32, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-05T01:01:43,608 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T01:01:43,611 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-05T01:01:43,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:43,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:43,625 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:44,733 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-1585596545464485310.jar 2024-12-05T01:01:44,733 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:44,734 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:44,813 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-123677179659202377.jar 2024-12-05T01:01:44,814 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:44,814 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:44,815 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:44,815 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:44,815 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:44,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:01:44,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T01:01:44,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T01:01:44,817 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T01:01:44,817 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T01:01:44,817 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T01:01:44,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T01:01:44,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T01:01:44,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T01:01:44,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T01:01:44,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T01:01:44,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T01:01:44,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:44,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:44,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:01:44,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:44,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:01:44,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:01:44,822 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:01:44,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742114_1290 (size=24020) 2024-12-05T01:01:44,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742114_1290 (size=24020) 2024-12-05T01:01:44,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742114_1290 (size=24020) 2024-12-05T01:01:44,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742115_1291 (size=77755) 2024-12-05T01:01:44,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742115_1291 (size=77755) 2024-12-05T01:01:44,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742115_1291 (size=77755) 2024-12-05T01:01:44,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742116_1292 (size=131360) 2024-12-05T01:01:44,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742116_1292 (size=131360) 2024-12-05T01:01:44,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742116_1292 (size=131360) 2024-12-05T01:01:45,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742117_1293 (size=111793) 2024-12-05T01:01:45,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742117_1293 (size=111793) 2024-12-05T01:01:45,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742117_1293 (size=111793) 2024-12-05T01:01:45,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742118_1294 (size=1832290) 2024-12-05T01:01:45,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742118_1294 (size=1832290) 2024-12-05T01:01:45,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742118_1294 (size=1832290) 2024-12-05T01:01:45,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742119_1295 (size=8360282) 2024-12-05T01:01:45,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742119_1295 (size=8360282) 2024-12-05T01:01:45,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742119_1295 (size=8360282) 2024-12-05T01:01:45,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742120_1296 (size=503880) 2024-12-05T01:01:45,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742120_1296 (size=503880) 2024-12-05T01:01:45,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742120_1296 (size=503880) 2024-12-05T01:01:45,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742121_1297 (size=6424744) 2024-12-05T01:01:45,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742121_1297 (size=6424744) 2024-12-05T01:01:45,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742121_1297 (size=6424744) 2024-12-05T01:01:45,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742122_1298 (size=322274) 2024-12-05T01:01:45,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742122_1298 (size=322274) 2024-12-05T01:01:45,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742122_1298 (size=322274) 2024-12-05T01:01:45,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742123_1299 (size=20406) 2024-12-05T01:01:45,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742123_1299 (size=20406) 2024-12-05T01:01:45,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742123_1299 (size=20406) 2024-12-05T01:01:45,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742124_1300 (size=443171) 2024-12-05T01:01:45,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742124_1300 (size=443171) 2024-12-05T01:01:45,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742124_1300 (size=443171) 2024-12-05T01:01:45,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742125_1301 (size=45609) 2024-12-05T01:01:45,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742125_1301 (size=45609) 2024-12-05T01:01:45,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742125_1301 (size=45609) 2024-12-05T01:01:45,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742126_1302 (size=136454) 2024-12-05T01:01:45,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742126_1302 (size=136454) 2024-12-05T01:01:45,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742126_1302 (size=136454) 2024-12-05T01:01:45,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742127_1303 (size=1597136) 2024-12-05T01:01:45,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742127_1303 (size=1597136) 2024-12-05T01:01:45,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742127_1303 (size=1597136) 2024-12-05T01:01:45,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742128_1304 (size=30873) 2024-12-05T01:01:45,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742128_1304 (size=30873) 2024-12-05T01:01:45,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742128_1304 (size=30873) 2024-12-05T01:01:45,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742129_1305 (size=29229) 2024-12-05T01:01:45,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742129_1305 (size=29229) 2024-12-05T01:01:45,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742129_1305 (size=29229) 2024-12-05T01:01:45,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742130_1306 (size=903858) 2024-12-05T01:01:45,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742130_1306 (size=903858) 2024-12-05T01:01:45,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742130_1306 (size=903858) 2024-12-05T01:01:46,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742131_1307 (size=5175431) 2024-12-05T01:01:46,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742131_1307 (size=5175431) 2024-12-05T01:01:46,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742131_1307 (size=5175431) 2024-12-05T01:01:46,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742132_1308 (size=232881) 2024-12-05T01:01:46,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742132_1308 (size=232881) 2024-12-05T01:01:46,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742132_1308 (size=232881) 2024-12-05T01:01:46,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742133_1309 (size=1323991) 2024-12-05T01:01:46,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742133_1309 (size=1323991) 2024-12-05T01:01:46,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742133_1309 (size=1323991) 2024-12-05T01:01:46,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742134_1310 (size=4695811) 2024-12-05T01:01:46,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742134_1310 (size=4695811) 2024-12-05T01:01:46,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742134_1310 (size=4695811) 2024-12-05T01:01:46,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742135_1311 (size=1877034) 2024-12-05T01:01:46,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742135_1311 (size=1877034) 2024-12-05T01:01:46,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742135_1311 (size=1877034) 2024-12-05T01:01:46,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742136_1312 (size=217555) 2024-12-05T01:01:46,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742136_1312 (size=217555) 2024-12-05T01:01:46,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742136_1312 (size=217555) 2024-12-05T01:01:46,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742137_1313 (size=4188619) 2024-12-05T01:01:46,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742137_1313 (size=4188619) 2024-12-05T01:01:46,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742137_1313 (size=4188619) 2024-12-05T01:01:46,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742138_1314 (size=127628) 2024-12-05T01:01:46,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742138_1314 (size=127628) 2024-12-05T01:01:46,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742138_1314 (size=127628) 2024-12-05T01:01:46,657 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T01:01:46,677 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-05T01:01:46,694 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-05T01:01:46,694 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-05T01:01:46,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742139_1315 (size=441) 2024-12-05T01:01:46,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742139_1315 (size=441) 2024-12-05T01:01:46,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742139_1315 (size=441) 2024-12-05T01:01:46,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742140_1316 (size=21) 2024-12-05T01:01:46,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742140_1316 (size=21) 2024-12-05T01:01:46,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742140_1316 (size=21) 2024-12-05T01:01:47,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742141_1317 (size=304126) 2024-12-05T01:01:47,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742141_1317 (size=304126) 2024-12-05T01:01:47,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742141_1317 (size=304126) 2024-12-05T01:01:48,499 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:01:48,499 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:01:48,536 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0005_000001 (auth:SIMPLE) from 127.0.0.1:47176 2024-12-05T01:01:49,236 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0006_000001 (auth:SIMPLE) from 127.0.0.1:52158 2024-12-05T01:01:53,617 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0005/container_1733360294895_0005_01_000001/launch_container.sh] 2024-12-05T01:01:53,617 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0005/container_1733360294895_0005_01_000001/container_tokens] 2024-12-05T01:01:53,617 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0005/container_1733360294895_0005_01_000001/sysfs] 2024-12-05T01:01:55,195 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0006_000001 (auth:SIMPLE) from 127.0.0.1:57848 2024-12-05T01:01:55,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742142_1318 (size=349824) 2024-12-05T01:01:55,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742142_1318 (size=349824) 2024-12-05T01:01:55,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742142_1318 (size=349824) 2024-12-05T01:01:57,412 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0006_000001 (auth:SIMPLE) from 127.0.0.1:51004 2024-12-05T01:01:57,413 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0006_000001 (auth:SIMPLE) from 127.0.0.1:50476 2024-12-05T01:02:02,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742143_1319 (size=21201) 2024-12-05T01:02:02,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742143_1319 (size=21201) 2024-12-05T01:02:02,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742143_1319 (size=21201) 2024-12-05T01:02:02,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742144_1320 (size=462) 2024-12-05T01:02:02,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742144_1320 (size=462) 2024-12-05T01:02:02,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742144_1320 (size=462) 2024-12-05T01:02:02,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742145_1321 (size=21201) 2024-12-05T01:02:02,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742145_1321 (size=21201) 2024-12-05T01:02:02,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742145_1321 (size=21201) 2024-12-05T01:02:02,251 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0006/container_1733360294895_0006_01_000002/launch_container.sh] 2024-12-05T01:02:02,251 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0006/container_1733360294895_0006_01_000002/container_tokens] 2024-12-05T01:02:02,251 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0006/container_1733360294895_0006_01_000002/sysfs] 2024-12-05T01:02:02,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742146_1322 (size=349824) 2024-12-05T01:02:02,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742146_1322 (size=349824) 2024-12-05T01:02:02,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742146_1322 (size=349824) 2024-12-05T01:02:02,270 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0006_000001 (auth:SIMPLE) from 127.0.0.1:35974 2024-12-05T01:02:02,277 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0006_000001 (auth:SIMPLE) from 127.0.0.1:60610 2024-12-05T01:02:03,486 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T01:02:03,486 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T01:02:03,490 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-05T01:02:03,490 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T01:02:03,491 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T01:02:03,491 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-05T01:02:03,492 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-05T01:02:03,492 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-05T01:02:03,492 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@5a301e32 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-05T01:02:03,492 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-05T01:02:03,492 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360485003/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-05T01:02:03,508 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-05T01:02:03,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-05T01:02:03,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-05T01:02:03,511 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360523511"}]},"ts":"1733360523511"} 2024-12-05T01:02:03,513 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-05T01:02:03,514 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-05T01:02:03,514 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-05T01:02:03,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7d542f0249a2e2b0d1794004da037d9b, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e5bf89c1ca44706b370e34cb6fcd5140, UNASSIGN}] 2024-12-05T01:02:03,517 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7d542f0249a2e2b0d1794004da037d9b, UNASSIGN 2024-12-05T01:02:03,517 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e5bf89c1ca44706b370e34cb6fcd5140, UNASSIGN 2024-12-05T01:02:03,517 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=7d542f0249a2e2b0d1794004da037d9b, regionState=CLOSING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:03,517 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=e5bf89c1ca44706b370e34cb6fcd5140, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:02:03,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7d542f0249a2e2b0d1794004da037d9b, UNASSIGN because future has completed 2024-12-05T01:02:03,519 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:02:03,519 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7d542f0249a2e2b0d1794004da037d9b, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:02:03,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e5bf89c1ca44706b370e34cb6fcd5140, UNASSIGN because future has completed 2024-12-05T01:02:03,520 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:02:03,520 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:02:03,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-05T01:02:03,672 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:02:03,672 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:02:03,672 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 7d542f0249a2e2b0d1794004da037d9b, disabling compactions & flushes 2024-12-05T01:02:03,672 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:02:03,672 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:02:03,672 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. after waiting 0 ms 2024-12-05T01:02:03,672 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:02:03,673 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:02:03,673 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:02:03,673 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing e5bf89c1ca44706b370e34cb6fcd5140, disabling compactions & flushes 2024-12-05T01:02:03,673 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:02:03,673 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:02:03,673 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. after waiting 0 ms 2024-12-05T01:02:03,673 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:02:03,678 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:02:03,678 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:02:03,684 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:02:03,684 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:02:03,684 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b. 2024-12-05T01:02:03,684 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140. 2024-12-05T01:02:03,684 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 7d542f0249a2e2b0d1794004da037d9b: Waiting for close lock at 1733360523672Running coprocessor pre-close hooks at 1733360523672Disabling compacts and flushes for region at 1733360523672Disabling writes for close at 1733360523672Writing region close event to WAL at 1733360523673 (+1 ms)Running coprocessor post-close hooks at 1733360523684 (+11 ms)Closed at 1733360523684 2024-12-05T01:02:03,684 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for e5bf89c1ca44706b370e34cb6fcd5140: Waiting for close lock at 1733360523673Running coprocessor pre-close hooks at 1733360523673Disabling compacts and flushes for region at 1733360523673Disabling writes for close at 1733360523673Writing region close event to WAL at 1733360523674 (+1 ms)Running coprocessor post-close hooks at 1733360523684 (+10 ms)Closed at 1733360523684 2024-12-05T01:02:03,686 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:02:03,686 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=7d542f0249a2e2b0d1794004da037d9b, regionState=CLOSED 2024-12-05T01:02:03,687 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:02:03,687 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=e5bf89c1ca44706b370e34cb6fcd5140, regionState=CLOSED 2024-12-05T01:02:03,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7d542f0249a2e2b0d1794004da037d9b, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:02:03,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:02:03,691 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-12-05T01:02:03,691 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 7d542f0249a2e2b0d1794004da037d9b, server=2113c16e5528,41191,1733360288910 in 170 msec 2024-12-05T01:02:03,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-12-05T01:02:03,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure e5bf89c1ca44706b370e34cb6fcd5140, server=2113c16e5528,32927,1733360288712 in 170 msec 2024-12-05T01:02:03,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7d542f0249a2e2b0d1794004da037d9b, UNASSIGN in 175 msec 2024-12-05T01:02:03,693 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-12-05T01:02:03,693 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e5bf89c1ca44706b370e34cb6fcd5140, UNASSIGN in 175 msec 2024-12-05T01:02:03,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-05T01:02:03,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 179 msec 2024-12-05T01:02:03,696 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360523696"}]},"ts":"1733360523696"} 2024-12-05T01:02:03,697 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-05T01:02:03,697 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-05T01:02:03,699 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 190 msec 2024-12-05T01:02:03,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-05T01:02:03,832 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-05T01:02:03,833 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-05T01:02:03,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T01:02:03,835 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T01:02:03,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-05T01:02:03,836 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T01:02:03,839 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-05T01:02:03,841 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:02:03,841 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:02:03,843 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/recovered.edits] 2024-12-05T01:02:03,843 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/recovered.edits] 2024-12-05T01:02:03,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T01:02:03,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T01:02:03,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T01:02:03,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T01:02:03,845 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-05T01:02:03,845 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-05T01:02:03,845 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-05T01:02:03,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T01:02:03,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T01:02:03,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:03,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T01:02:03,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:03,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:03,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:03,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-05T01:02:03,848 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-05T01:02:03,848 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-05T01:02:03,849 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/cf/b31e97dc1ffe4492b17d12d48b0251fa to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/cf/b31e97dc1ffe4492b17d12d48b0251fa 2024-12-05T01:02:03,850 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/cf/8bb6514d307e417b9c4ce86954718580 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/cf/8bb6514d307e417b9c4ce86954718580 2024-12-05T01:02:03,853 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140/recovered.edits/9.seqid 2024-12-05T01:02:03,854 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/e5bf89c1ca44706b370e34cb6fcd5140 2024-12-05T01:02:03,854 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b/recovered.edits/9.seqid 2024-12-05T01:02:03,855 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testConsecutiveExports/7d542f0249a2e2b0d1794004da037d9b 2024-12-05T01:02:03,855 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-05T01:02:03,857 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T01:02:03,860 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-05T01:02:03,863 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-05T01:02:03,864 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T01:02:03,864 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-05T01:02:03,864 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360523864"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:03,864 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360523864"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:03,867 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T01:02:03,867 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7d542f0249a2e2b0d1794004da037d9b, NAME => 'testtb-testConsecutiveExports,,1733360482417.7d542f0249a2e2b0d1794004da037d9b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e5bf89c1ca44706b370e34cb6fcd5140, NAME => 'testtb-testConsecutiveExports,1,1733360482417.e5bf89c1ca44706b370e34cb6fcd5140.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T01:02:03,867 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-05T01:02:03,867 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360523867"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:03,869 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-05T01:02:03,870 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T01:02:03,871 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 37 msec 2024-12-05T01:02:03,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-05T01:02:03,952 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-05T01:02:03,952 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-05T01:02:03,961 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-05T01:02:03,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-05T01:02:03,965 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-05T01:02:03,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-05T01:02:03,997 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=801 (was 801), OpenFileDescriptor=799 (was 803), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=683 (was 710), ProcessCount=17 (was 24), AvailableMemoryMB=5409 (was 5594) 2024-12-05T01:02:03,998 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-12-05T01:02:04,021 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=801, OpenFileDescriptor=799, MaxFileDescriptor=1048576, SystemLoadAverage=683, ProcessCount=17, AvailableMemoryMB=5407 2024-12-05T01:02:04,021 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-12-05T01:02:04,023 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:02:04,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:04,025 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:02:04,025 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:04,025 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-05T01:02:04,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-05T01:02:04,030 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:02:04,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742147_1323 (size=422) 2024-12-05T01:02:04,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742147_1323 (size=422) 2024-12-05T01:02:04,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742147_1323 (size=422) 2024-12-05T01:02:04,048 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0b389d15c67c422ab446672d19f8a327, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:04,049 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 32b2bae45319cdf8a32d24c50dad228f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:04,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742148_1324 (size=83) 2024-12-05T01:02:04,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742148_1324 (size=83) 2024-12-05T01:02:04,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742148_1324 (size=83) 2024-12-05T01:02:04,076 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:04,076 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 0b389d15c67c422ab446672d19f8a327, disabling compactions & flushes 2024-12-05T01:02:04,076 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:04,076 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:04,076 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. after waiting 0 ms 2024-12-05T01:02:04,076 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:04,076 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:04,076 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0b389d15c67c422ab446672d19f8a327: Waiting for close lock at 1733360524076Disabling compacts and flushes for region at 1733360524076Disabling writes for close at 1733360524076Writing region close event to WAL at 1733360524076Closed at 1733360524076 2024-12-05T01:02:04,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742149_1325 (size=83) 2024-12-05T01:02:04,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742149_1325 (size=83) 2024-12-05T01:02:04,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742149_1325 (size=83) 2024-12-05T01:02:04,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:04,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 32b2bae45319cdf8a32d24c50dad228f, disabling compactions & flushes 2024-12-05T01:02:04,087 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:04,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:04,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. after waiting 0 ms 2024-12-05T01:02:04,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:04,087 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:04,087 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 32b2bae45319cdf8a32d24c50dad228f: Waiting for close lock at 1733360524087Disabling compacts and flushes for region at 1733360524087Disabling writes for close at 1733360524087Writing region close event to WAL at 1733360524087Closed at 1733360524087 2024-12-05T01:02:04,089 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:02:04,089 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733360524089"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360524089"}]},"ts":"1733360524089"} 2024-12-05T01:02:04,089 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733360524089"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360524089"}]},"ts":"1733360524089"} 2024-12-05T01:02:04,092 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:02:04,093 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:02:04,093 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360524093"}]},"ts":"1733360524093"} 2024-12-05T01:02:04,095 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-05T01:02:04,095 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:02:04,096 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:02:04,097 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:02:04,097 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:02:04,097 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:02:04,097 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:02:04,097 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:02:04,097 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:02:04,097 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:02:04,097 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:02:04,097 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:02:04,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b389d15c67c422ab446672d19f8a327, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=32b2bae45319cdf8a32d24c50dad228f, ASSIGN}] 2024-12-05T01:02:04,098 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b389d15c67c422ab446672d19f8a327, ASSIGN 2024-12-05T01:02:04,099 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=32b2bae45319cdf8a32d24c50dad228f, ASSIGN 2024-12-05T01:02:04,099 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b389d15c67c422ab446672d19f8a327, ASSIGN; state=OFFLINE, location=2113c16e5528,44435,1733360288855; forceNewPlan=false, retain=false 2024-12-05T01:02:04,100 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=32b2bae45319cdf8a32d24c50dad228f, ASSIGN; state=OFFLINE, location=2113c16e5528,41191,1733360288910; forceNewPlan=false, retain=false 2024-12-05T01:02:04,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-05T01:02:04,250 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:02:04,251 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=32b2bae45319cdf8a32d24c50dad228f, regionState=OPENING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:04,251 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=0b389d15c67c422ab446672d19f8a327, regionState=OPENING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:04,256 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b389d15c67c422ab446672d19f8a327, ASSIGN because future has completed 2024-12-05T01:02:04,257 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0b389d15c67c422ab446672d19f8a327, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:02:04,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=32b2bae45319cdf8a32d24c50dad228f, ASSIGN because future has completed 2024-12-05T01:02:04,258 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 32b2bae45319cdf8a32d24c50dad228f, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:02:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-05T01:02:04,414 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:04,414 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:04,414 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 0b389d15c67c422ab446672d19f8a327, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T01:02:04,414 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => 32b2bae45319cdf8a32d24c50dad228f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T01:02:04,414 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. service=AccessControlService 2024-12-05T01:02:04,414 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. service=AccessControlService 2024-12-05T01:02:04,414 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:04,414 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:04,415 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,415 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,415 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:04,415 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:04,415 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,415 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,415 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,415 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,416 INFO [StoreOpener-32b2bae45319cdf8a32d24c50dad228f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,416 INFO [StoreOpener-0b389d15c67c422ab446672d19f8a327-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,417 INFO [StoreOpener-32b2bae45319cdf8a32d24c50dad228f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 32b2bae45319cdf8a32d24c50dad228f columnFamilyName cf 2024-12-05T01:02:04,417 INFO [StoreOpener-0b389d15c67c422ab446672d19f8a327-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0b389d15c67c422ab446672d19f8a327 columnFamilyName cf 2024-12-05T01:02:04,417 DEBUG [StoreOpener-0b389d15c67c422ab446672d19f8a327-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:04,417 DEBUG [StoreOpener-32b2bae45319cdf8a32d24c50dad228f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:04,418 INFO [StoreOpener-32b2bae45319cdf8a32d24c50dad228f-1 {}] regionserver.HStore(327): Store=32b2bae45319cdf8a32d24c50dad228f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:04,418 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,418 INFO [StoreOpener-0b389d15c67c422ab446672d19f8a327-1 {}] regionserver.HStore(327): Store=0b389d15c67c422ab446672d19f8a327/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:04,418 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,419 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,419 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,419 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,419 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,419 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,419 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,419 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,419 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,421 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,421 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,423 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:04,423 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 0b389d15c67c422ab446672d19f8a327; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72609565, jitterRate=0.081966832280159}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:04,423 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,424 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 0b389d15c67c422ab446672d19f8a327: Running coprocessor pre-open hook at 1733360524415Writing region info on filesystem at 1733360524415Initializing all the Stores at 1733360524416 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360524416Cleaning up temporary data from old regions at 1733360524419 (+3 ms)Running coprocessor post-open hooks at 1733360524423 (+4 ms)Region opened successfully at 1733360524424 (+1 ms) 2024-12-05T01:02:04,425 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327., pid=135, masterSystemTime=1733360524410 2024-12-05T01:02:04,427 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:04,427 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:04,427 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=0b389d15c67c422ab446672d19f8a327, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:04,429 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:04,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0b389d15c67c422ab446672d19f8a327, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:02:04,429 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened 32b2bae45319cdf8a32d24c50dad228f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75363130, jitterRate=0.12299814820289612}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:04,429 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,430 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for 32b2bae45319cdf8a32d24c50dad228f: Running coprocessor pre-open hook at 1733360524415Writing region info on filesystem at 1733360524415Initializing all the Stores at 1733360524416 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360524416Cleaning up temporary data from old regions at 1733360524419 (+3 ms)Running coprocessor post-open hooks at 1733360524429 (+10 ms)Region opened successfully at 1733360524430 (+1 ms) 2024-12-05T01:02:04,430 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f., pid=136, masterSystemTime=1733360524411 2024-12-05T01:02:04,432 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:04,432 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:04,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=133 2024-12-05T01:02:04,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 0b389d15c67c422ab446672d19f8a327, server=2113c16e5528,44435,1733360288855 in 173 msec 2024-12-05T01:02:04,432 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=32b2bae45319cdf8a32d24c50dad228f, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:04,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b389d15c67c422ab446672d19f8a327, ASSIGN in 335 msec 2024-12-05T01:02:04,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 32b2bae45319cdf8a32d24c50dad228f, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:02:04,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=134 2024-12-05T01:02:04,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure 32b2bae45319cdf8a32d24c50dad228f, server=2113c16e5528,41191,1733360288910 in 177 msec 2024-12-05T01:02:04,438 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-12-05T01:02:04,438 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=32b2bae45319cdf8a32d24c50dad228f, ASSIGN in 340 msec 2024-12-05T01:02:04,440 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:02:04,440 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360524440"}]},"ts":"1733360524440"} 2024-12-05T01:02:04,442 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-05T01:02:04,443 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:02:04,443 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-05T01:02:04,447 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-05T01:02:04,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:04,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:04,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:04,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:04,453 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:04,453 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:04,454 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:04,454 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:04,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 430 msec 2024-12-05T01:02:04,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-05T01:02:04,662 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-05T01:02:04,662 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-05T01:02:04,662 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:04,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-05T01:02:04,667 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:04,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-05T01:02:04,667 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T01:02:04,670 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-05T01:02:04,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360524670 (current time:1733360524670). 2024-12-05T01:02:04,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:02:04,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-05T01:02:04,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:02:04,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c19ca75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:04,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:04,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:04,672 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:04,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:04,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:04,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c2d2b87, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:04,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:04,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:04,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:04,673 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52280, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:04,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75f73765, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:04,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:04,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:04,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:04,676 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45812, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:04,677 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:02:04,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:04,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:04,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:04,677 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:04,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35700db4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:04,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:04,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:04,679 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:04,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:04,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:04,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f5140c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:04,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:04,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:04,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:04,680 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52298, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:04,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bc21dfc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:04,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:04,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:04,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:04,683 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45816, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:04,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:04,685 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:02:04,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:04,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:04,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:04,685 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:04,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-05T01:02:04,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:02:04,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-05T01:02:04,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-05T01:02:04,688 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:02:04,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-05T01:02:04,689 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:02:04,691 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:02:04,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742150_1326 (size=215) 2024-12-05T01:02:04,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742150_1326 (size=215) 2024-12-05T01:02:04,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742150_1326 (size=215) 2024-12-05T01:02:04,697 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:02:04,697 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b389d15c67c422ab446672d19f8a327}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32b2bae45319cdf8a32d24c50dad228f}] 2024-12-05T01:02:04,698 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,698 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-05T01:02:04,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-05T01:02:04,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-05T01:02:04,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:04,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:04,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for 32b2bae45319cdf8a32d24c50dad228f: 2024-12-05T01:02:04,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 0b389d15c67c422ab446672d19f8a327: 2024-12-05T01:02:04,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-05T01:02:04,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-05T01:02:04,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:04,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:04,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:04,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:04,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:02:04,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:02:04,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742151_1327 (size=86) 2024-12-05T01:02:04,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742151_1327 (size=86) 2024-12-05T01:02:04,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742152_1328 (size=86) 2024-12-05T01:02:04,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742152_1328 (size=86) 2024-12-05T01:02:04,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742151_1327 (size=86) 2024-12-05T01:02:04,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742152_1328 (size=86) 2024-12-05T01:02:04,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:04,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:04,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-05T01:02:04,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-05T01:02:04,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-05T01:02:04,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-05T01:02:04,859 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,859 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:04,859 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:04,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0b389d15c67c422ab446672d19f8a327 in 163 msec 2024-12-05T01:02:04,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=137 2024-12-05T01:02:04,862 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:02:04,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 32b2bae45319cdf8a32d24c50dad228f in 163 msec 2024-12-05T01:02:04,863 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:02:04,863 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:02:04,863 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:04,864 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:04,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742153_1329 (size=597) 2024-12-05T01:02:04,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742153_1329 (size=597) 2024-12-05T01:02:04,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742153_1329 (size=597) 2024-12-05T01:02:04,875 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:02:04,878 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:02:04,879 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:04,880 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:02:04,880 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-05T01:02:04,881 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 194 msec 2024-12-05T01:02:05,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-05T01:02:05,002 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-05T01:02:05,006 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='0a6ed19bfb0a7db714a6360dd5fc97837', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:02:05,007 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='15192db491684b2220418d6e4bbf189e8', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:05,008 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='2ec85cbb84cbba682260427a2c0f2074c', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:05,009 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='3e22e6e4d9b243aafb64ad1ea3bc08e10', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:05,009 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='43acd299e17bbdbc9b8dd91095dc936db', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:05,012 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44435 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:02:05,014 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41191 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:02:05,015 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T01:02:05,017 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:05,017 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:05,017 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:05,018 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T01:02:05,022 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T01:02:05,027 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T01:02:05,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-05T01:02:05,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360525029 (current time:1733360525029). 2024-12-05T01:02:05,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:02:05,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-05T01:02:05,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:02:05,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@471aef00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:05,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:05,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:05,031 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:05,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:05,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:05,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@626b6716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:05,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:05,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:05,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:05,032 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52308, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:05,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59a1ad64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:05,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:05,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:05,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:05,034 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45818, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:05,035 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:05,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:05,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:05,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:05,035 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:05,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f71953c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:05,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:05,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:05,037 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:05,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:05,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:05,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40b83369, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:05,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:05,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:05,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:05,038 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52330, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:05,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@779f934d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:05,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:05,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:05,040 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:05,040 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45832, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:05,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:05,042 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:05,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:05,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:05,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:05,043 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:05,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-05T01:02:05,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:02:05,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-05T01:02:05,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-05T01:02:05,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-05T01:02:05,045 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:02:05,046 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:02:05,048 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:02:05,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742154_1330 (size=210) 2024-12-05T01:02:05,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742154_1330 (size=210) 2024-12-05T01:02:05,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742154_1330 (size=210) 2024-12-05T01:02:05,054 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:02:05,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b389d15c67c422ab446672d19f8a327}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32b2bae45319cdf8a32d24c50dad228f}] 2024-12-05T01:02:05,055 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:05,055 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:05,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-05T01:02:05,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-05T01:02:05,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-05T01:02:05,207 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:05,207 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:05,207 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 0b389d15c67c422ab446672d19f8a327 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-05T01:02:05,207 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing 32b2bae45319cdf8a32d24c50dad228f 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-05T01:02:05,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/.tmp/cf/963a1afc8f844490ad1c9e958efd44b1 is 71, key is 025167f4009d02c180ae377505a31248/cf:q/1733360525012/Put/seqid=0 2024-12-05T01:02:05,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/.tmp/cf/a976300f918444aaa149e735b14665af is 71, key is 1d235c22e6373160d5a4719f5f90be24/cf:q/1733360525014/Put/seqid=0 2024-12-05T01:02:05,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742155_1331 (size=5354) 2024-12-05T01:02:05,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742155_1331 (size=5354) 2024-12-05T01:02:05,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742156_1332 (size=8258) 2024-12-05T01:02:05,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742156_1332 (size=8258) 2024-12-05T01:02:05,230 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/.tmp/cf/963a1afc8f844490ad1c9e958efd44b1 2024-12-05T01:02:05,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742155_1331 (size=5354) 2024-12-05T01:02:05,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742156_1332 (size=8258) 2024-12-05T01:02:05,231 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/.tmp/cf/a976300f918444aaa149e735b14665af 2024-12-05T01:02:05,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/.tmp/cf/963a1afc8f844490ad1c9e958efd44b1 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/cf/963a1afc8f844490ad1c9e958efd44b1 2024-12-05T01:02:05,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/.tmp/cf/a976300f918444aaa149e735b14665af as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/cf/a976300f918444aaa149e735b14665af 2024-12-05T01:02:05,240 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/cf/963a1afc8f844490ad1c9e958efd44b1, entries=4, sequenceid=6, filesize=5.2 K 2024-12-05T01:02:05,241 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/cf/a976300f918444aaa149e735b14665af, entries=46, sequenceid=6, filesize=8.1 K 2024-12-05T01:02:05,241 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 0b389d15c67c422ab446672d19f8a327 in 34ms, sequenceid=6, compaction requested=false 2024-12-05T01:02:05,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-05T01:02:05,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 0b389d15c67c422ab446672d19f8a327: 2024-12-05T01:02:05,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-05T01:02:05,241 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 32b2bae45319cdf8a32d24c50dad228f in 34ms, sequenceid=6, compaction requested=false 2024-12-05T01:02:05,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for 32b2bae45319cdf8a32d24c50dad228f: 2024-12-05T01:02:05,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:05,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-05T01:02:05,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:05,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/cf/963a1afc8f844490ad1c9e958efd44b1] hfiles 2024-12-05T01:02:05,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/cf/963a1afc8f844490ad1c9e958efd44b1 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:05,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:05,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:05,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/cf/a976300f918444aaa149e735b14665af] hfiles 2024-12-05T01:02:05,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/cf/a976300f918444aaa149e735b14665af for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:05,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742157_1333 (size=125) 2024-12-05T01:02:05,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742158_1334 (size=125) 2024-12-05T01:02:05,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742158_1334 (size=125) 2024-12-05T01:02:05,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742157_1333 (size=125) 2024-12-05T01:02:05,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742157_1333 (size=125) 2024-12-05T01:02:05,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:05,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742158_1334 (size=125) 2024-12-05T01:02:05,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-05T01:02:05,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:05,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-05T01:02:05,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-05T01:02:05,249 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:05,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-05T01:02:05,249 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:05,249 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:05,249 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:05,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0b389d15c67c422ab446672d19f8a327 in 196 msec 2024-12-05T01:02:05,252 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=142, resume processing ppid=140 2024-12-05T01:02:05,252 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 32b2bae45319cdf8a32d24c50dad228f in 196 msec 2024-12-05T01:02:05,252 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:02:05,252 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:02:05,253 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:02:05,253 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:05,254 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:05,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742159_1335 (size=675) 2024-12-05T01:02:05,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742159_1335 (size=675) 2024-12-05T01:02:05,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742159_1335 (size=675) 2024-12-05T01:02:05,263 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:02:05,266 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:02:05,267 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:05,268 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:02:05,268 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-05T01:02:05,269 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 225 msec 2024-12-05T01:02:05,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-05T01:02:05,362 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-05T01:02:05,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45834, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T01:02:05,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52460, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T01:02:05,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58376, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T01:02:05,366 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:02:05,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:05,367 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:02:05,368 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:05,368 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-05T01:02:05,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-05T01:02:05,368 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:02:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742160_1336 (size=399) 2024-12-05T01:02:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742160_1336 (size=399) 2024-12-05T01:02:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742160_1336 (size=399) 2024-12-05T01:02:05,377 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1b1d465c05c6bf6dd4bc7a7c22203ce4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:05,377 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 84caee47b3599da908b2901f15df81e7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:05,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742161_1337 (size=85) 2024-12-05T01:02:05,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742161_1337 (size=85) 2024-12-05T01:02:05,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742161_1337 (size=85) 2024-12-05T01:02:05,386 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:05,386 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 1b1d465c05c6bf6dd4bc7a7c22203ce4, disabling compactions & flushes 2024-12-05T01:02:05,386 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. 2024-12-05T01:02:05,386 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. 2024-12-05T01:02:05,386 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. after waiting 0 ms 2024-12-05T01:02:05,386 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. 2024-12-05T01:02:05,386 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. 2024-12-05T01:02:05,387 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1b1d465c05c6bf6dd4bc7a7c22203ce4: Waiting for close lock at 1733360525386Disabling compacts and flushes for region at 1733360525386Disabling writes for close at 1733360525386Writing region close event to WAL at 1733360525386Closed at 1733360525386 2024-12-05T01:02:05,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742162_1338 (size=85) 2024-12-05T01:02:05,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742162_1338 (size=85) 2024-12-05T01:02:05,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742162_1338 (size=85) 2024-12-05T01:02:05,393 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:05,394 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 84caee47b3599da908b2901f15df81e7, disabling compactions & flushes 2024-12-05T01:02:05,394 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. 2024-12-05T01:02:05,394 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. 2024-12-05T01:02:05,394 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. after waiting 0 ms 2024-12-05T01:02:05,394 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. 2024-12-05T01:02:05,394 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. 2024-12-05T01:02:05,394 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 84caee47b3599da908b2901f15df81e7: Waiting for close lock at 1733360525394Disabling compacts and flushes for region at 1733360525394Disabling writes for close at 1733360525394Writing region close event to WAL at 1733360525394Closed at 1733360525394 2024-12-05T01:02:05,395 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:02:05,395 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733360525395"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360525395"}]},"ts":"1733360525395"} 2024-12-05T01:02:05,395 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733360525395"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360525395"}]},"ts":"1733360525395"} 2024-12-05T01:02:05,398 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:02:05,398 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:02:05,398 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360525398"}]},"ts":"1733360525398"} 2024-12-05T01:02:05,400 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-05T01:02:05,400 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:02:05,401 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:02:05,401 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:02:05,401 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:02:05,401 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:02:05,401 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:02:05,401 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:02:05,401 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:02:05,401 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:02:05,401 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:02:05,401 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:02:05,401 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1b1d465c05c6bf6dd4bc7a7c22203ce4, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=84caee47b3599da908b2901f15df81e7, ASSIGN}] 2024-12-05T01:02:05,402 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=84caee47b3599da908b2901f15df81e7, ASSIGN 2024-12-05T01:02:05,402 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1b1d465c05c6bf6dd4bc7a7c22203ce4, ASSIGN 2024-12-05T01:02:05,403 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1b1d465c05c6bf6dd4bc7a7c22203ce4, ASSIGN; state=OFFLINE, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T01:02:05,403 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=84caee47b3599da908b2901f15df81e7, ASSIGN; state=OFFLINE, location=2113c16e5528,44435,1733360288855; forceNewPlan=false, retain=false 2024-12-05T01:02:05,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-05T01:02:05,553 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:02:05,553 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=84caee47b3599da908b2901f15df81e7, regionState=OPENING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:05,553 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=1b1d465c05c6bf6dd4bc7a7c22203ce4, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:02:05,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1b1d465c05c6bf6dd4bc7a7c22203ce4, ASSIGN because future has completed 2024-12-05T01:02:05,555 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1b1d465c05c6bf6dd4bc7a7c22203ce4, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:02:05,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=84caee47b3599da908b2901f15df81e7, ASSIGN because future has completed 2024-12-05T01:02:05,556 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84caee47b3599da908b2901f15df81e7, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:02:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-05T01:02:05,710 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. 2024-12-05T01:02:05,710 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 1b1d465c05c6bf6dd4bc7a7c22203ce4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4.', STARTKEY => '', ENDKEY => '2'} 2024-12-05T01:02:05,710 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. service=AccessControlService 2024-12-05T01:02:05,710 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:05,710 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:05,710 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. 2024-12-05T01:02:05,710 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:05,710 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => 84caee47b3599da908b2901f15df81e7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7.', STARTKEY => '2', ENDKEY => ''} 2024-12-05T01:02:05,710 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:05,710 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:05,711 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. service=AccessControlService 2024-12-05T01:02:05,711 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:05,711 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:05,711 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:05,711 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for 84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:05,711 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for 84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:05,712 INFO [StoreOpener-1b1d465c05c6bf6dd4bc7a7c22203ce4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:05,712 INFO [StoreOpener-84caee47b3599da908b2901f15df81e7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:05,713 INFO [StoreOpener-1b1d465c05c6bf6dd4bc7a7c22203ce4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1b1d465c05c6bf6dd4bc7a7c22203ce4 columnFamilyName cf 2024-12-05T01:02:05,713 INFO [StoreOpener-84caee47b3599da908b2901f15df81e7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 84caee47b3599da908b2901f15df81e7 columnFamilyName cf 2024-12-05T01:02:05,713 DEBUG [StoreOpener-1b1d465c05c6bf6dd4bc7a7c22203ce4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:05,713 DEBUG [StoreOpener-84caee47b3599da908b2901f15df81e7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:05,714 INFO [StoreOpener-1b1d465c05c6bf6dd4bc7a7c22203ce4-1 {}] regionserver.HStore(327): Store=1b1d465c05c6bf6dd4bc7a7c22203ce4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:05,714 INFO [StoreOpener-84caee47b3599da908b2901f15df81e7-1 {}] regionserver.HStore(327): Store=84caee47b3599da908b2901f15df81e7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:05,714 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for 84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:05,714 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:05,714 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:05,714 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:05,715 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:05,715 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:05,715 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for 84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:05,715 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for 84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:05,715 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:05,715 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:05,716 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for 84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:05,717 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:05,718 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:05,719 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:05,719 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened 84caee47b3599da908b2901f15df81e7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69747589, jitterRate=0.03932006657123566}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:05,719 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 1b1d465c05c6bf6dd4bc7a7c22203ce4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62491943, jitterRate=-0.06879748404026031}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:05,719 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:05,719 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:05,720 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for 84caee47b3599da908b2901f15df81e7: Running coprocessor pre-open hook at 1733360525711Writing region info on filesystem at 1733360525711Initializing all the Stores at 1733360525712 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360525712Cleaning up temporary data from old regions at 1733360525715 (+3 ms)Running coprocessor post-open hooks at 1733360525719 (+4 ms)Region opened successfully at 1733360525720 (+1 ms) 2024-12-05T01:02:05,720 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 1b1d465c05c6bf6dd4bc7a7c22203ce4: Running coprocessor pre-open hook at 1733360525711Writing region info on filesystem at 1733360525711Initializing all the Stores at 1733360525711Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360525711Cleaning up temporary data from old regions at 1733360525715 (+4 ms)Running coprocessor post-open hooks at 1733360525719 (+4 ms)Region opened successfully at 1733360525720 (+1 ms) 2024-12-05T01:02:05,721 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7., pid=147, masterSystemTime=1733360525708 2024-12-05T01:02:05,721 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4., pid=146, masterSystemTime=1733360525707 2024-12-05T01:02:05,722 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. 2024-12-05T01:02:05,723 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. 2024-12-05T01:02:05,723 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=84caee47b3599da908b2901f15df81e7, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:05,723 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. 2024-12-05T01:02:05,723 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. 2024-12-05T01:02:05,724 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=1b1d465c05c6bf6dd4bc7a7c22203ce4, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:02:05,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84caee47b3599da908b2901f15df81e7, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:02:05,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1b1d465c05c6bf6dd4bc7a7c22203ce4, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:02:05,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=145 2024-12-05T01:02:05,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure 84caee47b3599da908b2901f15df81e7, server=2113c16e5528,44435,1733360288855 in 169 msec 2024-12-05T01:02:05,728 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-12-05T01:02:05,728 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 1b1d465c05c6bf6dd4bc7a7c22203ce4, server=2113c16e5528,32927,1733360288712 in 171 msec 2024-12-05T01:02:05,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=84caee47b3599da908b2901f15df81e7, ASSIGN in 326 msec 2024-12-05T01:02:05,729 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=143 2024-12-05T01:02:05,729 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1b1d465c05c6bf6dd4bc7a7c22203ce4, ASSIGN in 327 msec 2024-12-05T01:02:05,730 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:02:05,730 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360525730"}]},"ts":"1733360525730"} 2024-12-05T01:02:05,731 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-05T01:02:05,732 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:02:05,732 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-05T01:02:05,735 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-05T01:02:05,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:05,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:05,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:05,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:05,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:05,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:05,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:05,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:05,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:05,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:05,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:05,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:05,741 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 373 msec 2024-12-05T01:02:05,901 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-05T01:02:05,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-05T01:02:05,992 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-05T01:02:05,995 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:05,998 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:02:06,000 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-05T01:02:06,012 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [1b1d465c05c6bf6dd4bc7a7c22203ce4, 84caee47b3599da908b2901f15df81e7] 2024-12-05T01:02:06,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[1b1d465c05c6bf6dd4bc7a7c22203ce4, 84caee47b3599da908b2901f15df81e7], force=true 2024-12-05T01:02:06,018 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[1b1d465c05c6bf6dd4bc7a7c22203ce4, 84caee47b3599da908b2901f15df81e7], force=true 2024-12-05T01:02:06,018 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[1b1d465c05c6bf6dd4bc7a7c22203ce4, 84caee47b3599da908b2901f15df81e7], force=true 2024-12-05T01:02:06,018 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[1b1d465c05c6bf6dd4bc7a7c22203ce4, 84caee47b3599da908b2901f15df81e7], force=true 2024-12-05T01:02:06,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-05T01:02:06,024 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1b1d465c05c6bf6dd4bc7a7c22203ce4, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=84caee47b3599da908b2901f15df81e7, UNASSIGN}] 2024-12-05T01:02:06,025 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1b1d465c05c6bf6dd4bc7a7c22203ce4, UNASSIGN 2024-12-05T01:02:06,025 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=84caee47b3599da908b2901f15df81e7, UNASSIGN 2024-12-05T01:02:06,025 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=84caee47b3599da908b2901f15df81e7, regionState=CLOSING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:06,025 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=1b1d465c05c6bf6dd4bc7a7c22203ce4, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:02:06,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=84caee47b3599da908b2901f15df81e7, UNASSIGN because future has completed 2024-12-05T01:02:06,027 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-05T01:02:06,027 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 84caee47b3599da908b2901f15df81e7, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:02:06,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1b1d465c05c6bf6dd4bc7a7c22203ce4, UNASSIGN because future has completed 2024-12-05T01:02:06,028 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-05T01:02:06,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1b1d465c05c6bf6dd4bc7a7c22203ce4, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:02:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-05T01:02:06,179 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:06,179 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-05T01:02:06,180 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 84caee47b3599da908b2901f15df81e7, disabling compactions & flushes 2024-12-05T01:02:06,180 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. 2024-12-05T01:02:06,180 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. 2024-12-05T01:02:06,180 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. after waiting 0 ms 2024-12-05T01:02:06,180 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. 2024-12-05T01:02:06,180 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 84caee47b3599da908b2901f15df81e7 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-05T01:02:06,180 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close 1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:06,180 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-05T01:02:06,180 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing 1b1d465c05c6bf6dd4bc7a7c22203ce4, disabling compactions & flushes 2024-12-05T01:02:06,180 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. 2024-12-05T01:02:06,180 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. 2024-12-05T01:02:06,181 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. after waiting 0 ms 2024-12-05T01:02:06,181 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. 2024-12-05T01:02:06,181 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing 1b1d465c05c6bf6dd4bc7a7c22203ce4 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-05T01:02:06,196 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/.tmp/cf/b6dd84c7dbdb4018b8e237a1e6412483 is 28, key is 2/cf:/1733360525999/Put/seqid=0 2024-12-05T01:02:06,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742163_1339 (size=4945) 2024-12-05T01:02:06,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742163_1339 (size=4945) 2024-12-05T01:02:06,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742163_1339 (size=4945) 2024-12-05T01:02:06,202 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/.tmp/cf/b6dd84c7dbdb4018b8e237a1e6412483 2024-12-05T01:02:06,202 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/.tmp/cf/9699be0150e24e41bedee24edbe3687b is 28, key is 1/cf:/1733360525996/Put/seqid=0 2024-12-05T01:02:06,207 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/.tmp/cf/b6dd84c7dbdb4018b8e237a1e6412483 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/cf/b6dd84c7dbdb4018b8e237a1e6412483 2024-12-05T01:02:06,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742164_1340 (size=4945) 2024-12-05T01:02:06,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742164_1340 (size=4945) 2024-12-05T01:02:06,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742164_1340 (size=4945) 2024-12-05T01:02:06,211 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/.tmp/cf/9699be0150e24e41bedee24edbe3687b 2024-12-05T01:02:06,211 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/cf/b6dd84c7dbdb4018b8e237a1e6412483, entries=1, sequenceid=5, filesize=4.8 K 2024-12-05T01:02:06,212 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 84caee47b3599da908b2901f15df81e7 in 32ms, sequenceid=5, compaction requested=false 2024-12-05T01:02:06,216 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/.tmp/cf/9699be0150e24e41bedee24edbe3687b as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/cf/9699be0150e24e41bedee24edbe3687b 2024-12-05T01:02:06,216 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T01:02:06,216 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:02:06,216 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. 2024-12-05T01:02:06,216 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 84caee47b3599da908b2901f15df81e7: Waiting for close lock at 1733360526179Running coprocessor pre-close hooks at 1733360526179Disabling compacts and flushes for region at 1733360526179Disabling writes for close at 1733360526180 (+1 ms)Obtaining lock to block concurrent updates at 1733360526180Preparing flush snapshotting stores in 84caee47b3599da908b2901f15df81e7 at 1733360526180Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733360526180Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7. at 1733360526180Flushing 84caee47b3599da908b2901f15df81e7/cf: creating writer at 1733360526181 (+1 ms)Flushing 84caee47b3599da908b2901f15df81e7/cf: appending metadata at 1733360526195 (+14 ms)Flushing 84caee47b3599da908b2901f15df81e7/cf: closing flushed file at 1733360526196 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e9d4270: reopening flushed file at 1733360526206 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 84caee47b3599da908b2901f15df81e7 in 32ms, sequenceid=5, compaction requested=false at 1733360526212 (+6 ms)Writing region close event to WAL at 1733360526213 (+1 ms)Running coprocessor post-close hooks at 1733360526216 (+3 ms)Closed at 1733360526216 2024-12-05T01:02:06,218 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:06,219 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=84caee47b3599da908b2901f15df81e7, regionState=CLOSED 2024-12-05T01:02:06,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 84caee47b3599da908b2901f15df81e7, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:02:06,220 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/cf/9699be0150e24e41bedee24edbe3687b, entries=1, sequenceid=5, filesize=4.8 K 2024-12-05T01:02:06,221 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 1b1d465c05c6bf6dd4bc7a7c22203ce4 in 40ms, sequenceid=5, compaction requested=false 2024-12-05T01:02:06,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-12-05T01:02:06,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure 84caee47b3599da908b2901f15df81e7, server=2113c16e5528,44435,1733360288855 in 194 msec 2024-12-05T01:02:06,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=84caee47b3599da908b2901f15df81e7, UNASSIGN in 199 msec 2024-12-05T01:02:06,225 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T01:02:06,226 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:02:06,226 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. 2024-12-05T01:02:06,226 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for 1b1d465c05c6bf6dd4bc7a7c22203ce4: Waiting for close lock at 1733360526180Running coprocessor pre-close hooks at 1733360526180Disabling compacts and flushes for region at 1733360526180Disabling writes for close at 1733360526181 (+1 ms)Obtaining lock to block concurrent updates at 1733360526181Preparing flush snapshotting stores in 1b1d465c05c6bf6dd4bc7a7c22203ce4 at 1733360526181Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733360526181Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4. at 1733360526181Flushing 1b1d465c05c6bf6dd4bc7a7c22203ce4/cf: creating writer at 1733360526181Flushing 1b1d465c05c6bf6dd4bc7a7c22203ce4/cf: appending metadata at 1733360526202 (+21 ms)Flushing 1b1d465c05c6bf6dd4bc7a7c22203ce4/cf: closing flushed file at 1733360526202Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6655a72: reopening flushed file at 1733360526215 (+13 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 1b1d465c05c6bf6dd4bc7a7c22203ce4 in 40ms, sequenceid=5, compaction requested=false at 1733360526221 (+6 ms)Writing region close event to WAL at 1733360526222 (+1 ms)Running coprocessor post-close hooks at 1733360526225 (+3 ms)Closed at 1733360526226 (+1 ms) 2024-12-05T01:02:06,227 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed 1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:06,227 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=1b1d465c05c6bf6dd4bc7a7c22203ce4, regionState=CLOSED 2024-12-05T01:02:06,229 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1b1d465c05c6bf6dd4bc7a7c22203ce4, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:02:06,231 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=149 2024-12-05T01:02:06,231 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 1b1d465c05c6bf6dd4bc7a7c22203ce4, server=2113c16e5528,32927,1733360288712 in 201 msec 2024-12-05T01:02:06,232 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-12-05T01:02:06,232 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=1b1d465c05c6bf6dd4bc7a7c22203ce4, UNASSIGN in 207 msec 2024-12-05T01:02:06,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742165_1341 (size=84) 2024-12-05T01:02:06,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742165_1341 (size=84) 2024-12-05T01:02:06,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742165_1341 (size=84) 2024-12-05T01:02:06,245 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:06,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742166_1342 (size=20) 2024-12-05T01:02:06,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742166_1342 (size=20) 2024-12-05T01:02:06,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742166_1342 (size=20) 2024-12-05T01:02:06,253 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:06,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742167_1343 (size=21) 2024-12-05T01:02:06,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742167_1343 (size=21) 2024-12-05T01:02:06,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742167_1343 (size=21) 2024-12-05T01:02:06,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742168_1344 (size=84) 2024-12-05T01:02:06,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742168_1344 (size=84) 2024-12-05T01:02:06,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742168_1344 (size=84) 2024-12-05T01:02:06,265 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:06,272 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-05T01:02:06,274 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525365.1b1d465c05c6bf6dd4bc7a7c22203ce4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:06,274 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733360525365.84caee47b3599da908b2901f15df81e7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:06,274 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:06,278 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0e7ee94856ad7fcaed02bac5c3cae8b9, ASSIGN}] 2024-12-05T01:02:06,278 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0e7ee94856ad7fcaed02bac5c3cae8b9, ASSIGN 2024-12-05T01:02:06,279 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0e7ee94856ad7fcaed02bac5c3cae8b9, ASSIGN; state=MERGED, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T01:02:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-05T01:02:06,429 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T01:02:06,430 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=0e7ee94856ad7fcaed02bac5c3cae8b9, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:02:06,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0e7ee94856ad7fcaed02bac5c3cae8b9, ASSIGN because future has completed 2024-12-05T01:02:06,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e7ee94856ad7fcaed02bac5c3cae8b9, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:02:06,465 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0006/container_1733360294895_0006_01_000003/launch_container.sh] 2024-12-05T01:02:06,465 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0006/container_1733360294895_0006_01_000003/container_tokens] 2024-12-05T01:02:06,465 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0006/container_1733360294895_0006_01_000003/sysfs] 2024-12-05T01:02:06,586 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. 2024-12-05T01:02:06,587 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 0e7ee94856ad7fcaed02bac5c3cae8b9, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9.', STARTKEY => '', ENDKEY => ''} 2024-12-05T01:02:06,587 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. service=AccessControlService 2024-12-05T01:02:06,587 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:06,587 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,587 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:06,588 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,588 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,589 INFO [StoreOpener-0e7ee94856ad7fcaed02bac5c3cae8b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,590 INFO [StoreOpener-0e7ee94856ad7fcaed02bac5c3cae8b9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0e7ee94856ad7fcaed02bac5c3cae8b9 columnFamilyName cf 2024-12-05T01:02:06,590 DEBUG [StoreOpener-0e7ee94856ad7fcaed02bac5c3cae8b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:06,597 DEBUG [StoreOpener-0e7ee94856ad7fcaed02bac5c3cae8b9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/cf/9699be0150e24e41bedee24edbe3687b.1b1d465c05c6bf6dd4bc7a7c22203ce4->hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/cf/9699be0150e24e41bedee24edbe3687b-top 2024-12-05T01:02:06,601 DEBUG [StoreOpener-0e7ee94856ad7fcaed02bac5c3cae8b9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/cf/b6dd84c7dbdb4018b8e237a1e6412483.84caee47b3599da908b2901f15df81e7->hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/cf/b6dd84c7dbdb4018b8e237a1e6412483-top 2024-12-05T01:02:06,602 INFO [StoreOpener-0e7ee94856ad7fcaed02bac5c3cae8b9-1 {}] regionserver.HStore(327): Store=0e7ee94856ad7fcaed02bac5c3cae8b9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:06,602 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,603 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,604 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,604 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,604 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,606 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,607 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 0e7ee94856ad7fcaed02bac5c3cae8b9; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73705759, jitterRate=0.09830139577388763}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:06,607 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,607 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 0e7ee94856ad7fcaed02bac5c3cae8b9: Running coprocessor pre-open hook at 1733360526588Writing region info on filesystem at 1733360526588Initializing all the Stores at 1733360526589 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360526589Cleaning up temporary data from old regions at 1733360526604 (+15 ms)Running coprocessor post-open hooks at 1733360526607 (+3 ms)Region opened successfully at 1733360526607 2024-12-05T01:02:06,608 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9., pid=154, masterSystemTime=1733360526583 2024-12-05T01:02:06,608 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9.,because compaction is disabled. 2024-12-05T01:02:06,610 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. 2024-12-05T01:02:06,610 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. 2024-12-05T01:02:06,611 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=0e7ee94856ad7fcaed02bac5c3cae8b9, regionState=OPEN, openSeqNum=9, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:02:06,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e7ee94856ad7fcaed02bac5c3cae8b9, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:02:06,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-05T01:02:06,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 0e7ee94856ad7fcaed02bac5c3cae8b9, server=2113c16e5528,32927,1733360288712 in 181 msec 2024-12-05T01:02:06,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-05T01:02:06,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0e7ee94856ad7fcaed02bac5c3cae8b9, ASSIGN in 337 msec 2024-12-05T01:02:06,619 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[1b1d465c05c6bf6dd4bc7a7c22203ce4, 84caee47b3599da908b2901f15df81e7], force=true in 604 msec 2024-12-05T01:02:06,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-05T01:02:06,643 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-05T01:02:06,644 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-05T01:02:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360526644 (current time:1733360526644). 2024-12-05T01:02:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:02:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-05T01:02:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:02:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a5ceb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:06,647 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:06,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:06,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:06,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c7f3057, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:06,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:06,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:06,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:06,649 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52344, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:06,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@598436a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:06,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:06,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:06,651 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:06,651 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45840, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:06,653 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:02:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:06,653 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:06,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47f452e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:06,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:06,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:06,655 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:06,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:06,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:06,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3010db8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:06,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:06,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:06,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:06,656 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52362, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:06,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15aa2335, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:06,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:06,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:06,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:06,659 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45848, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:06,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:06,662 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:02:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:06,662 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-05T01:02:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:02:06,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-05T01:02:06,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-05T01:02:06,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-05T01:02:06,665 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:02:06,666 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:02:06,668 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:02:06,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742169_1345 (size=216) 2024-12-05T01:02:06,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742169_1345 (size=216) 2024-12-05T01:02:06,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742169_1345 (size=216) 2024-12-05T01:02:06,679 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:02:06,679 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e7ee94856ad7fcaed02bac5c3cae8b9}] 2024-12-05T01:02:06,680 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,697 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:02:06,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-05T01:02:06,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-05T01:02:06,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. 2024-12-05T01:02:06,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 0e7ee94856ad7fcaed02bac5c3cae8b9: 2024-12-05T01:02:06,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-05T01:02:06,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:06,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:06,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/cf/9699be0150e24e41bedee24edbe3687b.1b1d465c05c6bf6dd4bc7a7c22203ce4->hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/cf/9699be0150e24e41bedee24edbe3687b-top, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/cf/b6dd84c7dbdb4018b8e237a1e6412483.84caee47b3599da908b2901f15df81e7->hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/cf/b6dd84c7dbdb4018b8e237a1e6412483-top] hfiles 2024-12-05T01:02:06,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/cf/9699be0150e24e41bedee24edbe3687b.1b1d465c05c6bf6dd4bc7a7c22203ce4 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:06,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/cf/b6dd84c7dbdb4018b8e237a1e6412483.84caee47b3599da908b2901f15df81e7 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:06,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742170_1346 (size=269) 2024-12-05T01:02:06,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742170_1346 (size=269) 2024-12-05T01:02:06,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742170_1346 (size=269) 2024-12-05T01:02:06,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. 2024-12-05T01:02:06,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-05T01:02:06,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-05T01:02:06,846 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,846 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:06,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-05T01:02:06,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0e7ee94856ad7fcaed02bac5c3cae8b9 in 168 msec 2024-12-05T01:02:06,849 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:02:06,849 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:02:06,850 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:02:06,850 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:06,851 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:06,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742171_1347 (size=670) 2024-12-05T01:02:06,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742171_1347 (size=670) 2024-12-05T01:02:06,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742171_1347 (size=670) 2024-12-05T01:02:06,866 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:02:06,871 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:02:06,872 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:06,873 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:02:06,873 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-05T01:02:06,875 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 210 msec 2024-12-05T01:02:06,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-05T01:02:06,982 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-05T01:02:06,983 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360526982 2024-12-05T01:02:06,983 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37149, tgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360526982, rawTgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360526982, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:07,020 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:07,020 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360526982, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360526982/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:07,022 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T01:02:07,029 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360526982/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:07,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742172_1348 (size=216) 2024-12-05T01:02:07,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742172_1348 (size=216) 2024-12-05T01:02:07,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742172_1348 (size=216) 2024-12-05T01:02:07,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742173_1349 (size=670) 2024-12-05T01:02:07,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742173_1349 (size=670) 2024-12-05T01:02:07,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742173_1349 (size=670) 2024-12-05T01:02:07,048 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:07,048 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:07,048 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:08,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:08,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-05T01:02:08,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:08,305 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-05T01:02:08,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-05T01:02:08,408 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-8562163271179138476.jar 2024-12-05T01:02:08,409 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:08,409 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:08,448 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0006_000001 (auth:SIMPLE) from 127.0.0.1:35990 2024-12-05T01:02:08,501 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0006/container_1733360294895_0006_01_000001/launch_container.sh] 2024-12-05T01:02:08,501 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0006/container_1733360294895_0006_01_000001/container_tokens] 2024-12-05T01:02:08,501 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0006/container_1733360294895_0006_01_000001/sysfs] 2024-12-05T01:02:08,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-5014647641860077495.jar 2024-12-05T01:02:08,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:08,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:08,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:08,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:08,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:08,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:08,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T01:02:08,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T01:02:08,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T01:02:08,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T01:02:08,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T01:02:08,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T01:02:08,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T01:02:08,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T01:02:08,514 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T01:02:08,514 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T01:02:08,514 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T01:02:08,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:08,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:08,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:02:08,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:08,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:08,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:02:08,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:02:08,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742174_1350 (size=24020) 2024-12-05T01:02:08,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742174_1350 (size=24020) 2024-12-05T01:02:08,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742174_1350 (size=24020) 2024-12-05T01:02:08,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742175_1351 (size=77755) 2024-12-05T01:02:08,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742175_1351 (size=77755) 2024-12-05T01:02:08,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742175_1351 (size=77755) 2024-12-05T01:02:08,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742176_1352 (size=131360) 2024-12-05T01:02:08,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742176_1352 (size=131360) 2024-12-05T01:02:08,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742176_1352 (size=131360) 2024-12-05T01:02:09,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742177_1353 (size=111793) 2024-12-05T01:02:09,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742177_1353 (size=111793) 2024-12-05T01:02:09,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742177_1353 (size=111793) 2024-12-05T01:02:09,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742178_1354 (size=1832290) 2024-12-05T01:02:09,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742178_1354 (size=1832290) 2024-12-05T01:02:09,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742178_1354 (size=1832290) 2024-12-05T01:02:09,256 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:02:09,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742179_1355 (size=8360282) 2024-12-05T01:02:09,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742179_1355 (size=8360282) 2024-12-05T01:02:09,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742179_1355 (size=8360282) 2024-12-05T01:02:09,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742180_1356 (size=503880) 2024-12-05T01:02:09,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742180_1356 (size=503880) 2024-12-05T01:02:09,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742180_1356 (size=503880) 2024-12-05T01:02:09,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742181_1357 (size=322274) 2024-12-05T01:02:09,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742181_1357 (size=322274) 2024-12-05T01:02:09,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742181_1357 (size=322274) 2024-12-05T01:02:09,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742182_1358 (size=20406) 2024-12-05T01:02:09,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742182_1358 (size=20406) 2024-12-05T01:02:09,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742182_1358 (size=20406) 2024-12-05T01:02:09,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742183_1359 (size=45609) 2024-12-05T01:02:09,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742183_1359 (size=45609) 2024-12-05T01:02:09,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742183_1359 (size=45609) 2024-12-05T01:02:10,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742184_1360 (size=136454) 2024-12-05T01:02:10,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742184_1360 (size=136454) 2024-12-05T01:02:10,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742184_1360 (size=136454) 2024-12-05T01:02:10,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742185_1361 (size=1597136) 2024-12-05T01:02:10,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742185_1361 (size=1597136) 2024-12-05T01:02:10,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742185_1361 (size=1597136) 2024-12-05T01:02:10,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742186_1362 (size=30873) 2024-12-05T01:02:10,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742186_1362 (size=30873) 2024-12-05T01:02:10,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742186_1362 (size=30873) 2024-12-05T01:02:10,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742187_1363 (size=29229) 2024-12-05T01:02:10,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742187_1363 (size=29229) 2024-12-05T01:02:10,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742187_1363 (size=29229) 2024-12-05T01:02:10,461 WARN [regionserver/2113c16e5528:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 1, running: 0 2024-12-05T01:02:10,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742188_1364 (size=903858) 2024-12-05T01:02:10,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742188_1364 (size=903858) 2024-12-05T01:02:10,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742188_1364 (size=903858) 2024-12-05T01:02:10,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742189_1365 (size=5175431) 2024-12-05T01:02:10,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742189_1365 (size=5175431) 2024-12-05T01:02:10,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742189_1365 (size=5175431) 2024-12-05T01:02:10,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742190_1366 (size=232881) 2024-12-05T01:02:10,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742190_1366 (size=232881) 2024-12-05T01:02:10,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742190_1366 (size=232881) 2024-12-05T01:02:10,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742191_1367 (size=1323991) 2024-12-05T01:02:10,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742191_1367 (size=1323991) 2024-12-05T01:02:10,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742191_1367 (size=1323991) 2024-12-05T01:02:11,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742192_1368 (size=4695811) 2024-12-05T01:02:11,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742192_1368 (size=4695811) 2024-12-05T01:02:11,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742192_1368 (size=4695811) 2024-12-05T01:02:11,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742193_1369 (size=1877034) 2024-12-05T01:02:11,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742193_1369 (size=1877034) 2024-12-05T01:02:11,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742193_1369 (size=1877034) 2024-12-05T01:02:11,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742194_1370 (size=217555) 2024-12-05T01:02:11,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742194_1370 (size=217555) 2024-12-05T01:02:11,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742194_1370 (size=217555) 2024-12-05T01:02:11,339 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 32b2bae45319cdf8a32d24c50dad228f changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:02:11,340 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 0b389d15c67c422ab446672d19f8a327 changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:02:11,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742195_1371 (size=443171) 2024-12-05T01:02:11,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742195_1371 (size=443171) 2024-12-05T01:02:11,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742195_1371 (size=443171) 2024-12-05T01:02:11,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742196_1372 (size=4188619) 2024-12-05T01:02:11,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742196_1372 (size=4188619) 2024-12-05T01:02:11,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742196_1372 (size=4188619) 2024-12-05T01:02:11,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742197_1373 (size=127628) 2024-12-05T01:02:11,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742197_1373 (size=127628) 2024-12-05T01:02:11,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742197_1373 (size=127628) 2024-12-05T01:02:11,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742198_1374 (size=6424744) 2024-12-05T01:02:11,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742198_1374 (size=6424744) 2024-12-05T01:02:11,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742198_1374 (size=6424744) 2024-12-05T01:02:11,627 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T01:02:11,631 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-05T01:02:11,641 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-05T01:02:11,641 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-05T01:02:11,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742199_1375 (size=481) 2024-12-05T01:02:11,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742199_1375 (size=481) 2024-12-05T01:02:11,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742199_1375 (size=481) 2024-12-05T01:02:11,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742200_1376 (size=21) 2024-12-05T01:02:11,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742200_1376 (size=21) 2024-12-05T01:02:11,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742200_1376 (size=21) 2024-12-05T01:02:11,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742201_1377 (size=304138) 2024-12-05T01:02:11,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742201_1377 (size=304138) 2024-12-05T01:02:11,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742201_1377 (size=304138) 2024-12-05T01:02:11,979 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:02:11,979 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:02:12,252 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0007_000001 (auth:SIMPLE) from 127.0.0.1:41038 2024-12-05T01:02:17,931 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0007_000001 (auth:SIMPLE) from 127.0.0.1:51004 2024-12-05T01:02:18,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742202_1378 (size=349836) 2024-12-05T01:02:18,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742202_1378 (size=349836) 2024-12-05T01:02:18,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742202_1378 (size=349836) 2024-12-05T01:02:20,173 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0007_000001 (auth:SIMPLE) from 127.0.0.1:41044 2024-12-05T01:02:20,173 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0007_000001 (auth:SIMPLE) from 127.0.0.1:60286 2024-12-05T01:02:23,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742203_1379 (size=4945) 2024-12-05T01:02:23,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742203_1379 (size=4945) 2024-12-05T01:02:23,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742203_1379 (size=4945) 2024-12-05T01:02:24,143 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0007/container_1733360294895_0007_01_000002/launch_container.sh] 2024-12-05T01:02:24,143 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0007/container_1733360294895_0007_01_000002/container_tokens] 2024-12-05T01:02:24,143 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0007/container_1733360294895_0007_01_000002/sysfs] 2024-12-05T01:02:24,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742205_1381 (size=4945) 2024-12-05T01:02:24,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742205_1381 (size=4945) 2024-12-05T01:02:24,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742205_1381 (size=4945) 2024-12-05T01:02:24,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742204_1380 (size=22246) 2024-12-05T01:02:24,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742204_1380 (size=22246) 2024-12-05T01:02:24,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742204_1380 (size=22246) 2024-12-05T01:02:24,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742206_1382 (size=482) 2024-12-05T01:02:24,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742206_1382 (size=482) 2024-12-05T01:02:24,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742206_1382 (size=482) 2024-12-05T01:02:24,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742207_1383 (size=22246) 2024-12-05T01:02:24,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742207_1383 (size=22246) 2024-12-05T01:02:24,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742207_1383 (size=22246) 2024-12-05T01:02:24,916 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0007/container_1733360294895_0007_01_000003/launch_container.sh] 2024-12-05T01:02:24,917 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0007/container_1733360294895_0007_01_000003/container_tokens] 2024-12-05T01:02:24,917 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_2/usercache/jenkins/appcache/application_1733360294895_0007/container_1733360294895_0007_01_000003/sysfs] 2024-12-05T01:02:24,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742208_1384 (size=349836) 2024-12-05T01:02:24,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742208_1384 (size=349836) 2024-12-05T01:02:24,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742208_1384 (size=349836) 2024-12-05T01:02:24,955 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0007_000001 (auth:SIMPLE) from 127.0.0.1:50544 2024-12-05T01:02:24,963 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0007_000001 (auth:SIMPLE) from 127.0.0.1:36438 2024-12-05T01:02:26,326 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T01:02:26,328 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T01:02:26,334 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,335 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T01:02:26,335 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T01:02:26,335 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,336 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-05T01:02:26,336 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-05T01:02:26,336 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360526982/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360526982/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,336 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360526982/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-05T01:02:26,336 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360526982/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-05T01:02:26,343 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-05T01:02:26,347 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360546347"}]},"ts":"1733360546347"} 2024-12-05T01:02:26,349 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-05T01:02:26,350 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-05T01:02:26,350 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-05T01:02:26,352 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0e7ee94856ad7fcaed02bac5c3cae8b9, UNASSIGN}] 2024-12-05T01:02:26,353 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0e7ee94856ad7fcaed02bac5c3cae8b9, UNASSIGN 2024-12-05T01:02:26,353 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=0e7ee94856ad7fcaed02bac5c3cae8b9, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:02:26,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0e7ee94856ad7fcaed02bac5c3cae8b9, UNASSIGN because future has completed 2024-12-05T01:02:26,355 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:02:26,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0e7ee94856ad7fcaed02bac5c3cae8b9, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:02:26,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-05T01:02:26,508 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:26,508 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:02:26,508 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing 0e7ee94856ad7fcaed02bac5c3cae8b9, disabling compactions & flushes 2024-12-05T01:02:26,508 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. 2024-12-05T01:02:26,508 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. 2024-12-05T01:02:26,508 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. after waiting 0 ms 2024-12-05T01:02:26,508 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. 2024-12-05T01:02:26,516 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-05T01:02:26,516 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:02:26,517 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9. 2024-12-05T01:02:26,517 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for 0e7ee94856ad7fcaed02bac5c3cae8b9: Waiting for close lock at 1733360546508Running coprocessor pre-close hooks at 1733360546508Disabling compacts and flushes for region at 1733360546508Disabling writes for close at 1733360546508Writing region close event to WAL at 1733360546509 (+1 ms)Running coprocessor post-close hooks at 1733360546516 (+7 ms)Closed at 1733360546517 (+1 ms) 2024-12-05T01:02:26,518 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed 0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:26,519 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=0e7ee94856ad7fcaed02bac5c3cae8b9, regionState=CLOSED 2024-12-05T01:02:26,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0e7ee94856ad7fcaed02bac5c3cae8b9, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:02:26,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-05T01:02:26,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure 0e7ee94856ad7fcaed02bac5c3cae8b9, server=2113c16e5528,32927,1733360288712 in 167 msec 2024-12-05T01:02:26,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-05T01:02:26,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0e7ee94856ad7fcaed02bac5c3cae8b9, UNASSIGN in 171 msec 2024-12-05T01:02:26,527 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-05T01:02:26,527 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 175 msec 2024-12-05T01:02:26,528 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360546528"}]},"ts":"1733360546528"} 2024-12-05T01:02:26,529 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-05T01:02:26,530 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-05T01:02:26,531 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 186 msec 2024-12-05T01:02:26,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-05T01:02:26,662 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-05T01:02:26,663 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,664 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,665 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,667 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,668 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:26,668 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:26,668 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:26,670 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/recovered.edits] 2024-12-05T01:02:26,670 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/recovered.edits] 2024-12-05T01:02:26,670 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/recovered.edits] 2024-12-05T01:02:26,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-05T01:02:26,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-05T01:02:26,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-05T01:02:26,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-05T01:02:26,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:26,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:26,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:26,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:26,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-05T01:02:26,675 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/cf/b6dd84c7dbdb4018b8e237a1e6412483 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/cf/b6dd84c7dbdb4018b8e237a1e6412483 2024-12-05T01:02:26,675 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/cf/9699be0150e24e41bedee24edbe3687b to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/cf/9699be0150e24e41bedee24edbe3687b 2024-12-05T01:02:26,676 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:26,676 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:26,677 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/cf/9699be0150e24e41bedee24edbe3687b.1b1d465c05c6bf6dd4bc7a7c22203ce4 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/cf/9699be0150e24e41bedee24edbe3687b.1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:26,677 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:26,677 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:26,679 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/cf/b6dd84c7dbdb4018b8e237a1e6412483.84caee47b3599da908b2901f15df81e7 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/cf/b6dd84c7dbdb4018b8e237a1e6412483.84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:26,680 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/recovered.edits/8.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7/recovered.edits/8.seqid 2024-12-05T01:02:26,681 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/84caee47b3599da908b2901f15df81e7 2024-12-05T01:02:26,681 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/recovered.edits/8.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4/recovered.edits/8.seqid 2024-12-05T01:02:26,682 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/1b1d465c05c6bf6dd4bc7a7c22203ce4 2024-12-05T01:02:26,682 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/recovered.edits/12.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9/recovered.edits/12.seqid 2024-12-05T01:02:26,683 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0e7ee94856ad7fcaed02bac5c3cae8b9 2024-12-05T01:02:26,683 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-05T01:02:26,685 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,687 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-05T01:02:26,690 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-05T01:02:26,691 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,691 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-05T01:02:26,691 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360546691"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:26,693 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-05T01:02:26,693 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0e7ee94856ad7fcaed02bac5c3cae8b9, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9.', STARTKEY => '', ENDKEY => ''}] 2024-12-05T01:02:26,693 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-05T01:02:26,693 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360546693"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:26,695 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-05T01:02:26,696 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 33 msec 2024-12-05T01:02:26,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-05T01:02:26,782 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:26,782 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-05T01:02:26,783 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:26,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:26,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-05T01:02:26,786 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360546785"}]},"ts":"1733360546785"} 2024-12-05T01:02:26,787 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-05T01:02:26,787 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-05T01:02:26,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-05T01:02:26,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b389d15c67c422ab446672d19f8a327, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=32b2bae45319cdf8a32d24c50dad228f, UNASSIGN}] 2024-12-05T01:02:26,790 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=32b2bae45319cdf8a32d24c50dad228f, UNASSIGN 2024-12-05T01:02:26,790 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b389d15c67c422ab446672d19f8a327, UNASSIGN 2024-12-05T01:02:26,790 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=32b2bae45319cdf8a32d24c50dad228f, regionState=CLOSING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:26,790 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=0b389d15c67c422ab446672d19f8a327, regionState=CLOSING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:26,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=32b2bae45319cdf8a32d24c50dad228f, UNASSIGN because future has completed 2024-12-05T01:02:26,792 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:02:26,792 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 32b2bae45319cdf8a32d24c50dad228f, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:02:26,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b389d15c67c422ab446672d19f8a327, UNASSIGN because future has completed 2024-12-05T01:02:26,793 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:02:26,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0b389d15c67c422ab446672d19f8a327, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:02:26,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-05T01:02:26,944 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:26,944 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:26,944 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:02:26,944 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:02:26,945 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing 0b389d15c67c422ab446672d19f8a327, disabling compactions & flushes 2024-12-05T01:02:26,945 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing 32b2bae45319cdf8a32d24c50dad228f, disabling compactions & flushes 2024-12-05T01:02:26,945 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:26,945 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:26,945 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:26,945 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:26,945 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. after waiting 0 ms 2024-12-05T01:02:26,945 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. after waiting 0 ms 2024-12-05T01:02:26,945 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:26,945 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:26,950 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:02:26,950 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:02:26,950 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:02:26,950 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:02:26,950 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f. 2024-12-05T01:02:26,950 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for 32b2bae45319cdf8a32d24c50dad228f: Waiting for close lock at 1733360546945Running coprocessor pre-close hooks at 1733360546945Disabling compacts and flushes for region at 1733360546945Disabling writes for close at 1733360546945Writing region close event to WAL at 1733360546946 (+1 ms)Running coprocessor post-close hooks at 1733360546950 (+4 ms)Closed at 1733360546950 2024-12-05T01:02:26,950 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327. 2024-12-05T01:02:26,950 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for 0b389d15c67c422ab446672d19f8a327: Waiting for close lock at 1733360546945Running coprocessor pre-close hooks at 1733360546945Disabling compacts and flushes for region at 1733360546945Disabling writes for close at 1733360546945Writing region close event to WAL at 1733360546946 (+1 ms)Running coprocessor post-close hooks at 1733360546950 (+4 ms)Closed at 1733360546950 2024-12-05T01:02:26,952 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed 0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:26,953 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=0b389d15c67c422ab446672d19f8a327, regionState=CLOSED 2024-12-05T01:02:26,953 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed 32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:26,953 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=32b2bae45319cdf8a32d24c50dad228f, regionState=CLOSED 2024-12-05T01:02:26,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0b389d15c67c422ab446672d19f8a327, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:02:26,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 32b2bae45319cdf8a32d24c50dad228f, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:02:26,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=164 2024-12-05T01:02:26,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure 0b389d15c67c422ab446672d19f8a327, server=2113c16e5528,44435,1733360288855 in 162 msec 2024-12-05T01:02:26,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-12-05T01:02:26,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure 32b2bae45319cdf8a32d24c50dad228f, server=2113c16e5528,41191,1733360288910 in 164 msec 2024-12-05T01:02:26,958 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b389d15c67c422ab446672d19f8a327, UNASSIGN in 168 msec 2024-12-05T01:02:26,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=165, resume processing ppid=163 2024-12-05T01:02:26,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=32b2bae45319cdf8a32d24c50dad228f, UNASSIGN in 169 msec 2024-12-05T01:02:26,961 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-05T01:02:26,961 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 172 msec 2024-12-05T01:02:26,963 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360546963"}]},"ts":"1733360546963"} 2024-12-05T01:02:26,964 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-05T01:02:26,965 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-05T01:02:26,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 182 msec 2024-12-05T01:02:27,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-05T01:02:27,102 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-05T01:02:27,103 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,104 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,105 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,107 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,109 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:27,109 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:27,112 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/recovered.edits] 2024-12-05T01:02:27,112 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/recovered.edits] 2024-12-05T01:02:27,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,115 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-05T01:02:27,115 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-05T01:02:27,115 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-05T01:02:27,116 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-05T01:02:27,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:27,117 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/cf/a976300f918444aaa149e735b14665af to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/cf/a976300f918444aaa149e735b14665af 2024-12-05T01:02:27,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:27,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:27,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:27,118 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/cf/963a1afc8f844490ad1c9e958efd44b1 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/cf/963a1afc8f844490ad1c9e958efd44b1 2024-12-05T01:02:27,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-05T01:02:27,121 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f/recovered.edits/9.seqid 2024-12-05T01:02:27,122 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/32b2bae45319cdf8a32d24c50dad228f 2024-12-05T01:02:27,122 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327/recovered.edits/9.seqid 2024-12-05T01:02:27,123 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b389d15c67c422ab446672d19f8a327 2024-12-05T01:02:27,123 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-05T01:02:27,125 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,128 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-05T01:02:27,130 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-05T01:02:27,132 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,132 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-05T01:02:27,132 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360547132"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:27,132 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360547132"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:27,134 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T01:02:27,134 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0b389d15c67c422ab446672d19f8a327, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733360524023.0b389d15c67c422ab446672d19f8a327.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 32b2bae45319cdf8a32d24c50dad228f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733360524023.32b2bae45319cdf8a32d24c50dad228f.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T01:02:27,134 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-05T01:02:27,134 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360547134"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:27,136 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-05T01:02:27,137 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,138 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 34 msec 2024-12-05T01:02:27,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-05T01:02:27,222 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,222 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-05T01:02:27,230 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-05T01:02:27,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,233 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-05T01:02:27,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:27,236 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-05T01:02:27,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:27,265 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=812 (was 801) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34543 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:54324 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38275 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_840982678_1 at /127.0.0.1:42914 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:51552 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:42936 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 1110) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_840982678_1 at /127.0.0.1:51514 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5750 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:38275 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=813 (was 799) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=630 (was 683), ProcessCount=17 (was 17), AvailableMemoryMB=5209 (was 5407) 2024-12-05T01:02:27,265 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-12-05T01:02:27,288 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=812, OpenFileDescriptor=813, MaxFileDescriptor=1048576, SystemLoadAverage=630, ProcessCount=17, AvailableMemoryMB=5205 2024-12-05T01:02:27,288 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-12-05T01:02:27,290 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:02:27,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T01:02:27,292 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:02:27,292 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:27,292 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-05T01:02:27,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-05T01:02:27,293 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:02:27,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742209_1385 (size=407) 2024-12-05T01:02:27,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742209_1385 (size=407) 2024-12-05T01:02:27,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742209_1385 (size=407) 2024-12-05T01:02:27,303 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 959b26dc683fadc597923b2691f756bb, NAME => 'testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:27,303 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ff9f2be2a5b6cef2eafb0420a3da7357, NAME => 'testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:27,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742210_1386 (size=68) 2024-12-05T01:02:27,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742210_1386 (size=68) 2024-12-05T01:02:27,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742210_1386 (size=68) 2024-12-05T01:02:27,312 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:27,313 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 959b26dc683fadc597923b2691f756bb, disabling compactions & flushes 2024-12-05T01:02:27,313 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:27,313 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:27,313 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. after waiting 0 ms 2024-12-05T01:02:27,313 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:27,313 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:27,313 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 959b26dc683fadc597923b2691f756bb: Waiting for close lock at 1733360547313Disabling compacts and flushes for region at 1733360547313Disabling writes for close at 1733360547313Writing region close event to WAL at 1733360547313Closed at 1733360547313 2024-12-05T01:02:27,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742211_1387 (size=68) 2024-12-05T01:02:27,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742211_1387 (size=68) 2024-12-05T01:02:27,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742211_1387 (size=68) 2024-12-05T01:02:27,319 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:27,319 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing ff9f2be2a5b6cef2eafb0420a3da7357, disabling compactions & flushes 2024-12-05T01:02:27,319 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:27,319 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:27,320 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. after waiting 0 ms 2024-12-05T01:02:27,320 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:27,320 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:27,320 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for ff9f2be2a5b6cef2eafb0420a3da7357: Waiting for close lock at 1733360547319Disabling compacts and flushes for region at 1733360547319Disabling writes for close at 1733360547320 (+1 ms)Writing region close event to WAL at 1733360547320Closed at 1733360547320 2024-12-05T01:02:27,321 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:02:27,321 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733360547321"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360547321"}]},"ts":"1733360547321"} 2024-12-05T01:02:27,321 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733360547321"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360547321"}]},"ts":"1733360547321"} 2024-12-05T01:02:27,325 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:02:27,326 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:02:27,326 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360547326"}]},"ts":"1733360547326"} 2024-12-05T01:02:27,328 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-05T01:02:27,329 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:02:27,330 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:02:27,330 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:02:27,330 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:02:27,330 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:02:27,330 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:02:27,330 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:02:27,330 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:02:27,330 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:02:27,330 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:02:27,330 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:02:27,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=959b26dc683fadc597923b2691f756bb, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ff9f2be2a5b6cef2eafb0420a3da7357, ASSIGN}] 2024-12-05T01:02:27,331 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=959b26dc683fadc597923b2691f756bb, ASSIGN 2024-12-05T01:02:27,331 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ff9f2be2a5b6cef2eafb0420a3da7357, ASSIGN 2024-12-05T01:02:27,332 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=959b26dc683fadc597923b2691f756bb, ASSIGN; state=OFFLINE, location=2113c16e5528,44435,1733360288855; forceNewPlan=false, retain=false 2024-12-05T01:02:27,333 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ff9f2be2a5b6cef2eafb0420a3da7357, ASSIGN; state=OFFLINE, location=2113c16e5528,41191,1733360288910; forceNewPlan=false, retain=false 2024-12-05T01:02:27,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-05T01:02:27,482 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:02:27,483 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=ff9f2be2a5b6cef2eafb0420a3da7357, regionState=OPENING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:27,483 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=959b26dc683fadc597923b2691f756bb, regionState=OPENING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:27,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ff9f2be2a5b6cef2eafb0420a3da7357, ASSIGN because future has completed 2024-12-05T01:02:27,485 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:02:27,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=959b26dc683fadc597923b2691f756bb, ASSIGN because future has completed 2024-12-05T01:02:27,486 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 959b26dc683fadc597923b2691f756bb, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:02:27,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-05T01:02:27,641 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:27,641 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:27,641 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => 959b26dc683fadc597923b2691f756bb, NAME => 'testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T01:02:27,641 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => ff9f2be2a5b6cef2eafb0420a3da7357, NAME => 'testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T01:02:27,642 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. service=AccessControlService 2024-12-05T01:02:27,642 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. service=AccessControlService 2024-12-05T01:02:27,642 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:27,642 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:27,642 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:27,642 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,642 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:27,642 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:27,642 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:27,642 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:27,642 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,642 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,644 INFO [StoreOpener-959b26dc683fadc597923b2691f756bb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,645 INFO [StoreOpener-ff9f2be2a5b6cef2eafb0420a3da7357-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:27,645 INFO [StoreOpener-959b26dc683fadc597923b2691f756bb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 959b26dc683fadc597923b2691f756bb columnFamilyName cf 2024-12-05T01:02:27,646 DEBUG [StoreOpener-959b26dc683fadc597923b2691f756bb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:27,646 INFO [StoreOpener-959b26dc683fadc597923b2691f756bb-1 {}] regionserver.HStore(327): Store=959b26dc683fadc597923b2691f756bb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:27,646 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,647 INFO [StoreOpener-ff9f2be2a5b6cef2eafb0420a3da7357-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff9f2be2a5b6cef2eafb0420a3da7357 columnFamilyName cf 2024-12-05T01:02:27,647 DEBUG [StoreOpener-ff9f2be2a5b6cef2eafb0420a3da7357-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:27,647 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,648 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,649 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,649 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,649 INFO [StoreOpener-ff9f2be2a5b6cef2eafb0420a3da7357-1 {}] regionserver.HStore(327): Store=ff9f2be2a5b6cef2eafb0420a3da7357/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:27,649 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:27,650 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:27,650 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,651 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:27,651 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:27,651 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:27,653 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:27,653 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:27,653 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened 959b26dc683fadc597923b2691f756bb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72803081, jitterRate=0.08485044538974762}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:27,653 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,654 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for 959b26dc683fadc597923b2691f756bb: Running coprocessor pre-open hook at 1733360547642Writing region info on filesystem at 1733360547642Initializing all the Stores at 1733360547643 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360547643Cleaning up temporary data from old regions at 1733360547649 (+6 ms)Running coprocessor post-open hooks at 1733360547654 (+5 ms)Region opened successfully at 1733360547654 2024-12-05T01:02:27,656 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:27,657 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb., pid=173, masterSystemTime=1733360547638 2024-12-05T01:02:27,657 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened ff9f2be2a5b6cef2eafb0420a3da7357; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61601361, jitterRate=-0.08206818997859955}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:27,657 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:27,657 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for ff9f2be2a5b6cef2eafb0420a3da7357: Running coprocessor pre-open hook at 1733360547642Writing region info on filesystem at 1733360547642Initializing all the Stores at 1733360547643 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360547643Cleaning up temporary data from old regions at 1733360547651 (+8 ms)Running coprocessor post-open hooks at 1733360547657 (+6 ms)Region opened successfully at 1733360547657 2024-12-05T01:02:27,658 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357., pid=172, masterSystemTime=1733360547637 2024-12-05T01:02:27,659 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:27,659 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:27,660 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=959b26dc683fadc597923b2691f756bb, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:27,661 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:27,661 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:27,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 959b26dc683fadc597923b2691f756bb, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:02:27,662 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=ff9f2be2a5b6cef2eafb0420a3da7357, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:27,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:02:27,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-12-05T01:02:27,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure 959b26dc683fadc597923b2691f756bb, server=2113c16e5528,44435,1733360288855 in 179 msec 2024-12-05T01:02:27,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-12-05T01:02:27,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357, server=2113c16e5528,41191,1733360288910 in 191 msec 2024-12-05T01:02:27,680 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=959b26dc683fadc597923b2691f756bb, ASSIGN in 347 msec 2024-12-05T01:02:27,681 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=171, resume processing ppid=169 2024-12-05T01:02:27,681 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ff9f2be2a5b6cef2eafb0420a3da7357, ASSIGN in 349 msec 2024-12-05T01:02:27,682 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:02:27,682 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360547682"}]},"ts":"1733360547682"} 2024-12-05T01:02:27,684 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-05T01:02:27,685 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:02:27,686 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-05T01:02:27,689 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-05T01:02:27,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:27,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:27,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:27,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:27,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:27,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:27,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:27,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:27,697 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 405 msec 2024-12-05T01:02:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-05T01:02:27,922 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-05T01:02:27,922 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-05T01:02:27,922 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:27,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-05T01:02:27,927 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:27,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-05T01:02:27,928 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T01:02:27,931 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-05T01:02:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360547931 (current time:1733360547931). 2024-12-05T01:02:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:02:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-05T01:02:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:02:27,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2279fa28, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:27,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:27,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:27,933 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:27,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:27,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:27,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a21c5b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:27,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:27,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:27,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:27,935 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39982, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:27,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@261a902c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:27,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:27,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:27,938 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49378, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:27,939 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:02:27,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:27,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:27,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:27,939 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@768fdb93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:27,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:27,941 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:27,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:27,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:27,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c443bd5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:27,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:27,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:27,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:27,944 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40008, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:27,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44add990, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:27,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:27,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:27,947 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:27,948 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49394, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:27,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:27,951 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:02:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:27,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:27,952 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:27,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-05T01:02:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:02:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-05T01:02:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-05T01:02:27,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-05T01:02:27,955 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:02:27,956 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:02:27,959 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:02:27,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742212_1388 (size=170) 2024-12-05T01:02:27,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742212_1388 (size=170) 2024-12-05T01:02:27,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742212_1388 (size=170) 2024-12-05T01:02:27,976 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:02:27,977 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 959b26dc683fadc597923b2691f756bb}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357}] 2024-12-05T01:02:27,978 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:27,978 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:28,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-05T01:02:28,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-05T01:02:28,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-05T01:02:28,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:28,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:28,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for 959b26dc683fadc597923b2691f756bb: 2024-12-05T01:02:28,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for ff9f2be2a5b6cef2eafb0420a3da7357: 2024-12-05T01:02:28,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-05T01:02:28,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-05T01:02:28,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:28,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:28,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:02:28,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:02:28,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742213_1389 (size=71) 2024-12-05T01:02:28,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742213_1389 (size=71) 2024-12-05T01:02:28,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742213_1389 (size=71) 2024-12-05T01:02:28,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742214_1390 (size=71) 2024-12-05T01:02:28,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742214_1390 (size=71) 2024-12-05T01:02:28,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742214_1390 (size=71) 2024-12-05T01:02:28,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-05T01:02:28,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-05T01:02:28,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-05T01:02:28,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T01:02:28,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T01:02:28,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:28,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:28,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-05T01:02:28,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-05T01:02:28,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-05T01:02:28,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-05T01:02:28,542 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:28,542 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:28,543 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:28,543 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:28,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 959b26dc683fadc597923b2691f756bb in 566 msec 2024-12-05T01:02:28,545 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=174 2024-12-05T01:02:28,545 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357 in 566 msec 2024-12-05T01:02:28,545 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:02:28,546 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:02:28,546 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:02:28,546 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,547 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742215_1391 (size=552) 2024-12-05T01:02:28,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742215_1391 (size=552) 2024-12-05T01:02:28,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742215_1391 (size=552) 2024-12-05T01:02:28,557 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:02:28,561 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:02:28,561 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,563 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:02:28,563 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-05T01:02:28,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 610 msec 2024-12-05T01:02:28,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-05T01:02:28,581 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-05T01:02:28,585 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='02d075e3dc99b961fc9996c6c05bc4d48', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:02:28,586 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='158dcec11fa0433392b428bf8cb485f7f', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:28,587 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='2524ba83d6a74f151c1e2d369fd890205', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:28,588 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='3d34b47620c31d00b0d3f81d150529374', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:28,589 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='40875a251567e8edc3997f68a35dc5dda', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:28,591 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44435 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:02:28,593 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41191 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:02:28,595 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T01:02:28,597 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-05T01:02:28,597 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:28,597 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:28,599 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T01:02:28,603 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T01:02:28,607 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T01:02:28,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-05T01:02:28,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360548610 (current time:1733360548610). 2024-12-05T01:02:28,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:02:28,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-05T01:02:28,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:02:28,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f68f656, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:28,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:28,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:28,611 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:28,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:28,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:28,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c66f497, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:28,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:28,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:28,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:28,613 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40038, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:28,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@213c149a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:28,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:28,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:28,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:28,615 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49396, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:28,616 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:28,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:28,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:28,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:28,616 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:28,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bba1cb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:28,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:28,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:28,617 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:28,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:28,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:28,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ce6e737, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:28,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:28,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:28,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:28,619 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40062, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:28,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26209bb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:28,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:28,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:28,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:28,621 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49406, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:28,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:28,624 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:28,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:28,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:28,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:28,624 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:28,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-05T01:02:28,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:02:28,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-05T01:02:28,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-05T01:02:28,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-05T01:02:28,627 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:02:28,628 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:02:28,629 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:02:28,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742216_1392 (size=165) 2024-12-05T01:02:28,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742216_1392 (size=165) 2024-12-05T01:02:28,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742216_1392 (size=165) 2024-12-05T01:02:28,636 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:02:28,636 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 959b26dc683fadc597923b2691f756bb}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357}] 2024-12-05T01:02:28,637 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:28,637 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:28,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-05T01:02:28,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-05T01:02:28,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-05T01:02:28,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:28,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:28,789 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing 959b26dc683fadc597923b2691f756bb 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-05T01:02:28,789 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing ff9f2be2a5b6cef2eafb0420a3da7357 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-05T01:02:28,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/.tmp/cf/575af66a2d204841adedc7acc8a3d641 is 71, key is 12f0305ec2eb8a7f692d77c040bd7c3d/cf:q/1733360548593/Put/seqid=0 2024-12-05T01:02:28,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/.tmp/cf/dc7ff33f86e84ccdbc5850026983269a is 71, key is 055b7712c91e39142391b1169afd0362/cf:q/1733360548591/Put/seqid=0 2024-12-05T01:02:28,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742218_1394 (size=5216) 2024-12-05T01:02:28,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742218_1394 (size=5216) 2024-12-05T01:02:28,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742218_1394 (size=5216) 2024-12-05T01:02:28,818 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/.tmp/cf/dc7ff33f86e84ccdbc5850026983269a 2024-12-05T01:02:28,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742217_1393 (size=8392) 2024-12-05T01:02:28,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742217_1393 (size=8392) 2024-12-05T01:02:28,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742217_1393 (size=8392) 2024-12-05T01:02:28,825 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/.tmp/cf/575af66a2d204841adedc7acc8a3d641 2024-12-05T01:02:28,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/.tmp/cf/dc7ff33f86e84ccdbc5850026983269a as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/cf/dc7ff33f86e84ccdbc5850026983269a 2024-12-05T01:02:28,830 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/cf/dc7ff33f86e84ccdbc5850026983269a, entries=2, sequenceid=6, filesize=5.1 K 2024-12-05T01:02:28,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/.tmp/cf/575af66a2d204841adedc7acc8a3d641 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/cf/575af66a2d204841adedc7acc8a3d641 2024-12-05T01:02:28,833 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 959b26dc683fadc597923b2691f756bb in 44ms, sequenceid=6, compaction requested=false 2024-12-05T01:02:28,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-05T01:02:28,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for 959b26dc683fadc597923b2691f756bb: 2024-12-05T01:02:28,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. for snaptb0-testExportExpiredSnapshot completed. 2024-12-05T01:02:28,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:28,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/cf/dc7ff33f86e84ccdbc5850026983269a] hfiles 2024-12-05T01:02:28,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/cf/dc7ff33f86e84ccdbc5850026983269a for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,836 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/cf/575af66a2d204841adedc7acc8a3d641, entries=48, sequenceid=6, filesize=8.2 K 2024-12-05T01:02:28,837 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for ff9f2be2a5b6cef2eafb0420a3da7357 in 48ms, sequenceid=6, compaction requested=false 2024-12-05T01:02:28,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for ff9f2be2a5b6cef2eafb0420a3da7357: 2024-12-05T01:02:28,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. for snaptb0-testExportExpiredSnapshot completed. 2024-12-05T01:02:28,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:28,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/cf/575af66a2d204841adedc7acc8a3d641] hfiles 2024-12-05T01:02:28,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/cf/575af66a2d204841adedc7acc8a3d641 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742219_1395 (size=110) 2024-12-05T01:02:28,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742219_1395 (size=110) 2024-12-05T01:02:28,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742219_1395 (size=110) 2024-12-05T01:02:28,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:28,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-05T01:02:28,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-05T01:02:28,841 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:28,841 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:28,843 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 959b26dc683fadc597923b2691f756bb in 206 msec 2024-12-05T01:02:28,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742220_1396 (size=110) 2024-12-05T01:02:28,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742220_1396 (size=110) 2024-12-05T01:02:28,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742220_1396 (size=110) 2024-12-05T01:02:28,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:28,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-05T01:02:28,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-05T01:02:28,845 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:28,845 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:28,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-12-05T01:02:28,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357 in 210 msec 2024-12-05T01:02:28,848 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:02:28,849 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:02:28,850 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:02:28,850 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,850 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742221_1397 (size=630) 2024-12-05T01:02:28,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742221_1397 (size=630) 2024-12-05T01:02:28,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742221_1397 (size=630) 2024-12-05T01:02:28,859 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:02:28,863 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:02:28,863 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-05T01:02:28,864 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:02:28,864 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-05T01:02:28,865 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 239 msec 2024-12-05T01:02:28,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-05T01:02:28,942 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-05T01:02:28,943 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:02:28,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-05T01:02:28,945 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:02:28,945 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:28,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-05T01:02:28,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-05T01:02:28,946 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:02:28,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742222_1398 (size=400) 2024-12-05T01:02:28,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742222_1398 (size=400) 2024-12-05T01:02:28,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742222_1398 (size=400) 2024-12-05T01:02:28,954 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 880916148c83f94d943041b7127cffb7, NAME => 'testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:28,954 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 8bbf90773c039aafe370b8325f074c96, NAME => 'testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:28,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742223_1399 (size=61) 2024-12-05T01:02:28,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742224_1400 (size=61) 2024-12-05T01:02:28,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742223_1399 (size=61) 2024-12-05T01:02:28,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742223_1399 (size=61) 2024-12-05T01:02:28,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742224_1400 (size=61) 2024-12-05T01:02:28,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742224_1400 (size=61) 2024-12-05T01:02:28,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:28,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:28,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 880916148c83f94d943041b7127cffb7, disabling compactions & flushes 2024-12-05T01:02:28,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 8bbf90773c039aafe370b8325f074c96, disabling compactions & flushes 2024-12-05T01:02:28,961 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:02:28,961 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:02:28,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:02:28,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:02:28,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. after waiting 0 ms 2024-12-05T01:02:28,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:02:28,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. after waiting 0 ms 2024-12-05T01:02:28,962 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:02:28,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:02:28,962 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:02:28,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 880916148c83f94d943041b7127cffb7: Waiting for close lock at 1733360548961Disabling compacts and flushes for region at 1733360548961Disabling writes for close at 1733360548961Writing region close event to WAL at 1733360548961Closed at 1733360548962 (+1 ms) 2024-12-05T01:02:28,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 8bbf90773c039aafe370b8325f074c96: Waiting for close lock at 1733360548961Disabling compacts and flushes for region at 1733360548961Disabling writes for close at 1733360548961Writing region close event to WAL at 1733360548962 (+1 ms)Closed at 1733360548962 2024-12-05T01:02:28,963 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:02:28,963 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733360548963"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360548963"}]},"ts":"1733360548963"} 2024-12-05T01:02:28,963 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733360548963"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360548963"}]},"ts":"1733360548963"} 2024-12-05T01:02:28,965 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:02:28,966 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:02:28,966 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360548966"}]},"ts":"1733360548966"} 2024-12-05T01:02:28,967 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-05T01:02:28,967 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:02:28,968 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:02:28,968 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:02:28,968 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:02:28,968 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:02:28,968 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:02:28,968 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:02:28,968 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:02:28,968 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:02:28,968 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:02:28,968 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:02:28,969 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=880916148c83f94d943041b7127cffb7, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=8bbf90773c039aafe370b8325f074c96, ASSIGN}] 2024-12-05T01:02:28,969 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=880916148c83f94d943041b7127cffb7, ASSIGN 2024-12-05T01:02:28,969 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=8bbf90773c039aafe370b8325f074c96, ASSIGN 2024-12-05T01:02:28,970 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=8bbf90773c039aafe370b8325f074c96, ASSIGN; state=OFFLINE, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T01:02:28,970 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=880916148c83f94d943041b7127cffb7, ASSIGN; state=OFFLINE, location=2113c16e5528,41191,1733360288910; forceNewPlan=false, retain=false 2024-12-05T01:02:29,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-05T01:02:29,120 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:02:29,121 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=880916148c83f94d943041b7127cffb7, regionState=OPENING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:29,121 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=8bbf90773c039aafe370b8325f074c96, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:02:29,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=880916148c83f94d943041b7127cffb7, ASSIGN because future has completed 2024-12-05T01:02:29,123 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 880916148c83f94d943041b7127cffb7, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:02:29,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=8bbf90773c039aafe370b8325f074c96, ASSIGN because future has completed 2024-12-05T01:02:29,123 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8bbf90773c039aafe370b8325f074c96, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:02:29,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-05T01:02:29,277 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:02:29,277 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => 880916148c83f94d943041b7127cffb7, NAME => 'testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T01:02:29,277 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. service=AccessControlService 2024-12-05T01:02:29,277 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:02:29,277 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:29,277 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => 8bbf90773c039aafe370b8325f074c96, NAME => 'testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T01:02:29,278 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,278 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:29,278 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,278 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,278 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. service=AccessControlService 2024-12-05T01:02:29,278 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:29,278 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,278 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:29,278 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,278 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,279 INFO [StoreOpener-880916148c83f94d943041b7127cffb7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,279 INFO [StoreOpener-8bbf90773c039aafe370b8325f074c96-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,280 INFO [StoreOpener-880916148c83f94d943041b7127cffb7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 880916148c83f94d943041b7127cffb7 columnFamilyName cf 2024-12-05T01:02:29,280 INFO [StoreOpener-8bbf90773c039aafe370b8325f074c96-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8bbf90773c039aafe370b8325f074c96 columnFamilyName cf 2024-12-05T01:02:29,280 DEBUG [StoreOpener-880916148c83f94d943041b7127cffb7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:29,280 DEBUG [StoreOpener-8bbf90773c039aafe370b8325f074c96-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:29,280 INFO [StoreOpener-880916148c83f94d943041b7127cffb7-1 {}] regionserver.HStore(327): Store=880916148c83f94d943041b7127cffb7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:29,281 INFO [StoreOpener-8bbf90773c039aafe370b8325f074c96-1 {}] regionserver.HStore(327): Store=8bbf90773c039aafe370b8325f074c96/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:29,281 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,281 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,281 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,281 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,281 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,281 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,282 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,282 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,282 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,282 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,283 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,283 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,285 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/8bbf90773c039aafe370b8325f074c96/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:29,285 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/880916148c83f94d943041b7127cffb7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:29,285 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened 8bbf90773c039aafe370b8325f074c96; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59757605, jitterRate=-0.10954229533672333}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:29,285 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened 880916148c83f94d943041b7127cffb7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70446553, jitterRate=0.04973544180393219}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:29,285 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,285 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,286 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for 880916148c83f94d943041b7127cffb7: Running coprocessor pre-open hook at 1733360549278Writing region info on filesystem at 1733360549278Initializing all the Stores at 1733360549278Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360549278Cleaning up temporary data from old regions at 1733360549282 (+4 ms)Running coprocessor post-open hooks at 1733360549285 (+3 ms)Region opened successfully at 1733360549286 (+1 ms) 2024-12-05T01:02:29,286 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for 8bbf90773c039aafe370b8325f074c96: Running coprocessor pre-open hook at 1733360549278Writing region info on filesystem at 1733360549278Initializing all the Stores at 1733360549279 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360549279Cleaning up temporary data from old regions at 1733360549282 (+3 ms)Running coprocessor post-open hooks at 1733360549285 (+3 ms)Region opened successfully at 1733360549286 (+1 ms) 2024-12-05T01:02:29,286 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7., pid=183, masterSystemTime=1733360549274 2024-12-05T01:02:29,286 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96., pid=184, masterSystemTime=1733360549275 2024-12-05T01:02:29,288 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:02:29,288 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:02:29,288 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=8bbf90773c039aafe370b8325f074c96, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:02:29,288 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:02:29,288 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:02:29,289 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=880916148c83f94d943041b7127cffb7, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:29,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8bbf90773c039aafe370b8325f074c96, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:02:29,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 880916148c83f94d943041b7127cffb7, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:02:29,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=182 2024-12-05T01:02:29,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure 8bbf90773c039aafe370b8325f074c96, server=2113c16e5528,32927,1733360288712 in 167 msec 2024-12-05T01:02:29,293 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=181 2024-12-05T01:02:29,293 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=8bbf90773c039aafe370b8325f074c96, ASSIGN in 323 msec 2024-12-05T01:02:29,293 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure 880916148c83f94d943041b7127cffb7, server=2113c16e5528,41191,1733360288910 in 168 msec 2024-12-05T01:02:29,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-12-05T01:02:29,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=880916148c83f94d943041b7127cffb7, ASSIGN in 324 msec 2024-12-05T01:02:29,295 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:02:29,295 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360549295"}]},"ts":"1733360549295"} 2024-12-05T01:02:29,296 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-05T01:02:29,297 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:02:29,297 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-05T01:02:29,300 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-05T01:02:29,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:29,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:29,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:29,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:29,311 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:29,311 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:29,311 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:29,311 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:29,311 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:29,311 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:29,311 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:29,311 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:29,311 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 367 msec 2024-12-05T01:02:29,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-05T01:02:29,572 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-05T01:02:29,572 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-05T01:02:29,572 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:29,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-05T01:02:29,576 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:29,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-12-05T01:02:29,576 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T01:02:29,581 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='08d937c901432505348791d7bd2351bf4', locateType=CURRENT is [region=testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:29,582 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='1a1ec8b79e3d03eb4b903d8d71fefd436', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:29,583 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='32f0bcf5c8fddf62fd9251e466b5d2804', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:29,583 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='2ed853d99cfa77804045b5c175ca15fd8', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:29,584 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='44a6662f073cee63a5fb1f3f074d1fd1a', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:29,585 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41191 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:02:29,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:02:29,590 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T01:02:29,592 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-05T01:02:29,592 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:02:29,592 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:29,593 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T01:02:29,597 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T01:02:29,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-05T01:02:29,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-05T01:02:29,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:02:29,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@719480f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:29,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:29,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:29,602 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:29,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:29,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:29,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4506d792, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:29,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:29,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:29,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:29,604 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40094, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:29,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43554f5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:29,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:29,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:29,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:29,606 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49408, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:29,607 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:29,607 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76fff639, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:29,608 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:29,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:29,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:29,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@427cd185, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:29,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:29,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:29,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:29,609 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40108, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:29,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75ac24fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:29,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:29,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:29,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:29,611 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49422, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:29,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:29,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:29,614 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-05T01:02:29,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:02:29,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-05T01:02:29,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-05T01:02:29,617 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:02:29,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-05T01:02:29,618 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:02:29,619 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:02:29,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742225_1401 (size=152) 2024-12-05T01:02:29,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742225_1401 (size=152) 2024-12-05T01:02:29,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742225_1401 (size=152) 2024-12-05T01:02:29,626 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:02:29,626 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 880916148c83f94d943041b7127cffb7}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8bbf90773c039aafe370b8325f074c96}] 2024-12-05T01:02:29,626 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:29,626 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-05T01:02:29,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-05T01:02:29,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-05T01:02:29,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:02:29,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:02:29,778 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing 880916148c83f94d943041b7127cffb7 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-05T01:02:29,778 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing 8bbf90773c039aafe370b8325f074c96 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-05T01:02:29,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/880916148c83f94d943041b7127cffb7/.tmp/cf/cde7eb2d4ab94264ae4beae79e1eb74d is 71, key is 06dc81c08bc804a564148ed723301327/cf:q/1733360549585/Put/seqid=0 2024-12-05T01:02:29,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/8bbf90773c039aafe370b8325f074c96/.tmp/cf/24dc0a1667694a4c85cb25d8fefe8e40 is 71, key is 17dfc076970acb77f9f47ae6ebd2c1c9/cf:q/1733360549588/Put/seqid=0 2024-12-05T01:02:29,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742227_1403 (size=8188) 2024-12-05T01:02:29,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742227_1403 (size=8188) 2024-12-05T01:02:29,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742226_1402 (size=5422) 2024-12-05T01:02:29,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742226_1402 (size=5422) 2024-12-05T01:02:29,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742226_1402 (size=5422) 2024-12-05T01:02:29,806 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/8bbf90773c039aafe370b8325f074c96/.tmp/cf/24dc0a1667694a4c85cb25d8fefe8e40 2024-12-05T01:02:29,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742227_1403 (size=8188) 2024-12-05T01:02:29,807 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/880916148c83f94d943041b7127cffb7/.tmp/cf/cde7eb2d4ab94264ae4beae79e1eb74d 2024-12-05T01:02:29,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/8bbf90773c039aafe370b8325f074c96/.tmp/cf/24dc0a1667694a4c85cb25d8fefe8e40 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/8bbf90773c039aafe370b8325f074c96/cf/24dc0a1667694a4c85cb25d8fefe8e40 2024-12-05T01:02:29,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/880916148c83f94d943041b7127cffb7/.tmp/cf/cde7eb2d4ab94264ae4beae79e1eb74d as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/880916148c83f94d943041b7127cffb7/cf/cde7eb2d4ab94264ae4beae79e1eb74d 2024-12-05T01:02:29,816 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/880916148c83f94d943041b7127cffb7/cf/cde7eb2d4ab94264ae4beae79e1eb74d, entries=5, sequenceid=5, filesize=5.3 K 2024-12-05T01:02:29,816 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/8bbf90773c039aafe370b8325f074c96/cf/24dc0a1667694a4c85cb25d8fefe8e40, entries=45, sequenceid=5, filesize=8.0 K 2024-12-05T01:02:29,817 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 8bbf90773c039aafe370b8325f074c96 in 38ms, sequenceid=5, compaction requested=false 2024-12-05T01:02:29,817 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 880916148c83f94d943041b7127cffb7 in 39ms, sequenceid=5, compaction requested=false 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for 8bbf90773c039aafe370b8325f074c96: 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for 880916148c83f94d943041b7127cffb7: 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. for snapshot-testExportExpiredSnapshot completed. 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. for snapshot-testExportExpiredSnapshot completed. 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/8bbf90773c039aafe370b8325f074c96/cf/24dc0a1667694a4c85cb25d8fefe8e40] hfiles 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/880916148c83f94d943041b7127cffb7/cf/cde7eb2d4ab94264ae4beae79e1eb74d] hfiles 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/880916148c83f94d943041b7127cffb7/cf/cde7eb2d4ab94264ae4beae79e1eb74d for snapshot=snapshot-testExportExpiredSnapshot 2024-12-05T01:02:29,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/8bbf90773c039aafe370b8325f074c96/cf/24dc0a1667694a4c85cb25d8fefe8e40 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-05T01:02:29,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742228_1404 (size=103) 2024-12-05T01:02:29,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:02:29,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-05T01:02:29,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-05T01:02:29,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,826 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:02:29,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742229_1405 (size=103) 2024-12-05T01:02:29,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742228_1404 (size=103) 2024-12-05T01:02:29,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742228_1404 (size=103) 2024-12-05T01:02:29,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742229_1405 (size=103) 2024-12-05T01:02:29,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742229_1405 (size=103) 2024-12-05T01:02:29,827 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8bbf90773c039aafe370b8325f074c96 in 200 msec 2024-12-05T01:02:29,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-05T01:02:30,226 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:02:30,226 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-05T01:02:30,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-05T01:02:30,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:30,227 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 880916148c83f94d943041b7127cffb7 2024-12-05T01:02:30,229 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-12-05T01:02:30,229 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 880916148c83f94d943041b7127cffb7 in 601 msec 2024-12-05T01:02:30,229 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:02:30,230 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:02:30,231 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:02:30,231 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-05T01:02:30,232 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-05T01:02:30,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742230_1406 (size=609) 2024-12-05T01:02:30,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742230_1406 (size=609) 2024-12-05T01:02:30,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-05T01:02:30,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742230_1406 (size=609) 2024-12-05T01:02:30,246 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:02:30,253 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:02:30,254 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-05T01:02:30,255 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:02:30,255 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-05T01:02:30,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 640 msec 2024-12-05T01:02:30,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-05T01:02:30,752 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-05T01:02:31,070 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0007_000001 (auth:SIMPLE) from 127.0.0.1:39850 2024-12-05T01:02:31,090 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_1/usercache/jenkins/appcache/application_1733360294895_0007/container_1733360294895_0007_01_000001/launch_container.sh] 2024-12-05T01:02:31,090 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_1/usercache/jenkins/appcache/application_1733360294895_0007/container_1733360294895_0007_01_000001/container_tokens] 2024-12-05T01:02:31,090 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_1/usercache/jenkins/appcache/application_1733360294895_0007/container_1733360294895_0007_01_000001/sysfs] 2024-12-05T01:02:32,072 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:02:36,697 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:02:38,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-05T01:02:38,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-05T01:02:40,763 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360560763 2024-12-05T01:02:40,763 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37149, tgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360560763, rawTgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360560763, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:40,791 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:40,791 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360560763, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360560763/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-05T01:02:40,793 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T01:02:40,794 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T01:02:40,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-05T01:02:40,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T01:02:40,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-05T01:02:40,798 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360560798"}]},"ts":"1733360560798"} 2024-12-05T01:02:40,800 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-05T01:02:40,800 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-05T01:02:40,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-05T01:02:40,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=959b26dc683fadc597923b2691f756bb, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ff9f2be2a5b6cef2eafb0420a3da7357, UNASSIGN}] 2024-12-05T01:02:40,802 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ff9f2be2a5b6cef2eafb0420a3da7357, UNASSIGN 2024-12-05T01:02:40,802 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=959b26dc683fadc597923b2691f756bb, UNASSIGN 2024-12-05T01:02:40,803 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=ff9f2be2a5b6cef2eafb0420a3da7357, regionState=CLOSING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:40,803 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=959b26dc683fadc597923b2691f756bb, regionState=CLOSING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:40,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=959b26dc683fadc597923b2691f756bb, UNASSIGN because future has completed 2024-12-05T01:02:40,804 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:02:40,804 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 959b26dc683fadc597923b2691f756bb, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:02:40,805 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ff9f2be2a5b6cef2eafb0420a3da7357, UNASSIGN because future has completed 2024-12-05T01:02:40,805 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:02:40,805 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:02:40,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-05T01:02:40,957 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:40,957 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:02:40,957 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing 959b26dc683fadc597923b2691f756bb, disabling compactions & flushes 2024-12-05T01:02:40,957 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:40,957 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:40,957 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. after waiting 0 ms 2024-12-05T01:02:40,957 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:40,957 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:40,958 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:02:40,958 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing ff9f2be2a5b6cef2eafb0420a3da7357, disabling compactions & flushes 2024-12-05T01:02:40,958 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:40,958 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:40,958 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. after waiting 0 ms 2024-12-05T01:02:40,958 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:40,961 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:02:40,961 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:02:40,961 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:02:40,961 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb. 2024-12-05T01:02:40,962 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:02:40,962 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for 959b26dc683fadc597923b2691f756bb: Waiting for close lock at 1733360560957Running coprocessor pre-close hooks at 1733360560957Disabling compacts and flushes for region at 1733360560957Disabling writes for close at 1733360560957Writing region close event to WAL at 1733360560958 (+1 ms)Running coprocessor post-close hooks at 1733360560961 (+3 ms)Closed at 1733360560961 2024-12-05T01:02:40,962 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357. 2024-12-05T01:02:40,962 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for ff9f2be2a5b6cef2eafb0420a3da7357: Waiting for close lock at 1733360560958Running coprocessor pre-close hooks at 1733360560958Disabling compacts and flushes for region at 1733360560958Disabling writes for close at 1733360560958Writing region close event to WAL at 1733360560959 (+1 ms)Running coprocessor post-close hooks at 1733360560961 (+2 ms)Closed at 1733360560962 (+1 ms) 2024-12-05T01:02:40,963 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed 959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:40,964 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=959b26dc683fadc597923b2691f756bb, regionState=CLOSED 2024-12-05T01:02:40,964 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:40,965 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=ff9f2be2a5b6cef2eafb0420a3da7357, regionState=CLOSED 2024-12-05T01:02:40,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 959b26dc683fadc597923b2691f756bb, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:02:40,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:02:40,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=190 2024-12-05T01:02:40,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure 959b26dc683fadc597923b2691f756bb, server=2113c16e5528,44435,1733360288855 in 162 msec 2024-12-05T01:02:40,969 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=959b26dc683fadc597923b2691f756bb, UNASSIGN in 167 msec 2024-12-05T01:02:40,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=191 2024-12-05T01:02:40,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure ff9f2be2a5b6cef2eafb0420a3da7357, server=2113c16e5528,41191,1733360288910 in 162 msec 2024-12-05T01:02:40,970 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-12-05T01:02:40,970 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ff9f2be2a5b6cef2eafb0420a3da7357, UNASSIGN in 168 msec 2024-12-05T01:02:40,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-05T01:02:40,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 171 msec 2024-12-05T01:02:40,973 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360560973"}]},"ts":"1733360560973"} 2024-12-05T01:02:40,975 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-05T01:02:40,975 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-05T01:02:40,976 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 180 msec 2024-12-05T01:02:41,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-05T01:02:41,112 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-05T01:02:41,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,114 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,115 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,116 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,118 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:41,118 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:41,120 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/recovered.edits] 2024-12-05T01:02:41,120 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/recovered.edits] 2024-12-05T01:02:41,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-05T01:02:41,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-05T01:02:41,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-05T01:02:41,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-05T01:02:41,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:41,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:41,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:41,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-05T01:02:41,125 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,125 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,125 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,126 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/cf/575af66a2d204841adedc7acc8a3d641 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/cf/575af66a2d204841adedc7acc8a3d641 2024-12-05T01:02:41,126 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/cf/dc7ff33f86e84ccdbc5850026983269a to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/cf/dc7ff33f86e84ccdbc5850026983269a 2024-12-05T01:02:41,126 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,129 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357/recovered.edits/9.seqid 2024-12-05T01:02:41,129 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb/recovered.edits/9.seqid 2024-12-05T01:02:41,129 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/ff9f2be2a5b6cef2eafb0420a3da7357 2024-12-05T01:02:41,130 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportExpiredSnapshot/959b26dc683fadc597923b2691f756bb 2024-12-05T01:02:41,130 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-05T01:02:41,132 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,134 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-05T01:02:41,136 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-05T01:02:41,137 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,137 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-05T01:02:41,137 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360561137"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:41,137 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360561137"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:41,139 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T01:02:41,139 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 959b26dc683fadc597923b2691f756bb, NAME => 'testtb-testExportExpiredSnapshot,,1733360547289.959b26dc683fadc597923b2691f756bb.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ff9f2be2a5b6cef2eafb0420a3da7357, NAME => 'testtb-testExportExpiredSnapshot,1,1733360547289.ff9f2be2a5b6cef2eafb0420a3da7357.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T01:02:41,139 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-05T01:02:41,139 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360561139"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:41,141 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-05T01:02:41,142 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,143 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 30 msec 2024-12-05T01:02:41,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-05T01:02:41,232 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-05T01:02:41,232 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-05T01:02:41,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-05T01:02:41,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-05T01:02:41,242 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-05T01:02:41,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-05T01:02:41,245 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-05T01:02:41,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-05T01:02:41,265 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=803 (was 812), OpenFileDescriptor=785 (was 813), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=592 (was 630), ProcessCount=11 (was 17), AvailableMemoryMB=5819 (was 5205) - AvailableMemoryMB LEAK? - 2024-12-05T01:02:41,265 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-12-05T01:02:41,281 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=803, OpenFileDescriptor=785, MaxFileDescriptor=1048576, SystemLoadAverage=592, ProcessCount=11, AvailableMemoryMB=5819 2024-12-05T01:02:41,281 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-12-05T01:02:41,282 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:02:41,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T01:02:41,284 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:02:41,284 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:41,284 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-05T01:02:41,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-05T01:02:41,285 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:02:41,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742231_1407 (size=412) 2024-12-05T01:02:41,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742231_1407 (size=412) 2024-12-05T01:02:41,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742231_1407 (size=412) 2024-12-05T01:02:41,292 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3a544c5760145e2a9f5308318c2ba2f9, NAME => 'testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:41,293 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 0f2d269b46975c77edbdde8d1abc928f, NAME => 'testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:41,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742232_1408 (size=73) 2024-12-05T01:02:41,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742232_1408 (size=73) 2024-12-05T01:02:41,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742233_1409 (size=73) 2024-12-05T01:02:41,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742233_1409 (size=73) 2024-12-05T01:02:41,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742232_1408 (size=73) 2024-12-05T01:02:41,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742233_1409 (size=73) 2024-12-05T01:02:41,299 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:41,299 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 3a544c5760145e2a9f5308318c2ba2f9, disabling compactions & flushes 2024-12-05T01:02:41,299 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:41,299 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:41,299 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. after waiting 0 ms 2024-12-05T01:02:41,299 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:41,299 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:41,299 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3a544c5760145e2a9f5308318c2ba2f9: Waiting for close lock at 1733360561299Disabling compacts and flushes for region at 1733360561299Disabling writes for close at 1733360561299Writing region close event to WAL at 1733360561299Closed at 1733360561299 2024-12-05T01:02:41,300 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:41,300 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 0f2d269b46975c77edbdde8d1abc928f, disabling compactions & flushes 2024-12-05T01:02:41,300 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:41,300 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:41,300 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. after waiting 0 ms 2024-12-05T01:02:41,300 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:41,300 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:41,300 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 0f2d269b46975c77edbdde8d1abc928f: Waiting for close lock at 1733360561300Disabling compacts and flushes for region at 1733360561300Disabling writes for close at 1733360561300Writing region close event to WAL at 1733360561300Closed at 1733360561300 2024-12-05T01:02:41,301 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:02:41,301 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733360561301"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360561301"}]},"ts":"1733360561301"} 2024-12-05T01:02:41,301 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733360561301"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360561301"}]},"ts":"1733360561301"} 2024-12-05T01:02:41,303 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:02:41,304 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:02:41,304 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360561304"}]},"ts":"1733360561304"} 2024-12-05T01:02:41,305 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-05T01:02:41,305 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:02:41,306 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:02:41,306 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:02:41,306 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:02:41,306 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:02:41,306 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:02:41,306 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:02:41,306 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:02:41,306 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:02:41,306 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:02:41,306 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:02:41,307 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a544c5760145e2a9f5308318c2ba2f9, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0f2d269b46975c77edbdde8d1abc928f, ASSIGN}] 2024-12-05T01:02:41,308 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0f2d269b46975c77edbdde8d1abc928f, ASSIGN 2024-12-05T01:02:41,308 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a544c5760145e2a9f5308318c2ba2f9, ASSIGN 2024-12-05T01:02:41,309 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0f2d269b46975c77edbdde8d1abc928f, ASSIGN; state=OFFLINE, location=2113c16e5528,41191,1733360288910; forceNewPlan=false, retain=false 2024-12-05T01:02:41,309 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a544c5760145e2a9f5308318c2ba2f9, ASSIGN; state=OFFLINE, location=2113c16e5528,44435,1733360288855; forceNewPlan=false, retain=false 2024-12-05T01:02:41,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-05T01:02:41,459 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:02:41,459 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=3a544c5760145e2a9f5308318c2ba2f9, regionState=OPENING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:41,459 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=0f2d269b46975c77edbdde8d1abc928f, regionState=OPENING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:41,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a544c5760145e2a9f5308318c2ba2f9, ASSIGN because future has completed 2024-12-05T01:02:41,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:02:41,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0f2d269b46975c77edbdde8d1abc928f, ASSIGN because future has completed 2024-12-05T01:02:41,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0f2d269b46975c77edbdde8d1abc928f, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:02:41,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-05T01:02:41,616 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:41,616 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:41,616 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => 3a544c5760145e2a9f5308318c2ba2f9, NAME => 'testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T01:02:41,616 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 0f2d269b46975c77edbdde8d1abc928f, NAME => 'testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T01:02:41,616 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. service=AccessControlService 2024-12-05T01:02:41,616 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. service=AccessControlService 2024-12-05T01:02:41,616 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:41,616 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:41,617 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:41,617 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,617 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:41,617 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:41,617 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:41,617 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,617 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:41,617 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,618 INFO [StoreOpener-3a544c5760145e2a9f5308318c2ba2f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:41,618 INFO [StoreOpener-0f2d269b46975c77edbdde8d1abc928f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,619 INFO [StoreOpener-3a544c5760145e2a9f5308318c2ba2f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3a544c5760145e2a9f5308318c2ba2f9 columnFamilyName cf 2024-12-05T01:02:41,619 INFO [StoreOpener-0f2d269b46975c77edbdde8d1abc928f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0f2d269b46975c77edbdde8d1abc928f columnFamilyName cf 2024-12-05T01:02:41,619 DEBUG [StoreOpener-3a544c5760145e2a9f5308318c2ba2f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:41,619 DEBUG [StoreOpener-0f2d269b46975c77edbdde8d1abc928f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:41,620 INFO [StoreOpener-0f2d269b46975c77edbdde8d1abc928f-1 {}] regionserver.HStore(327): Store=0f2d269b46975c77edbdde8d1abc928f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:41,620 INFO [StoreOpener-3a544c5760145e2a9f5308318c2ba2f9-1 {}] regionserver.HStore(327): Store=3a544c5760145e2a9f5308318c2ba2f9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:41,620 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:41,620 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,621 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,621 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:41,621 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:41,621 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,621 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,621 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:41,621 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,621 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:41,622 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:41,622 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,624 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:41,624 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:41,625 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened 3a544c5760145e2a9f5308318c2ba2f9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67354131, jitterRate=0.003654763102531433}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:41,625 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 0f2d269b46975c77edbdde8d1abc928f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74121142, jitterRate=0.10449108481407166}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:41,625 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:41,625 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,625 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 0f2d269b46975c77edbdde8d1abc928f: Running coprocessor pre-open hook at 1733360561617Writing region info on filesystem at 1733360561617Initializing all the Stores at 1733360561618 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360561618Cleaning up temporary data from old regions at 1733360561621 (+3 ms)Running coprocessor post-open hooks at 1733360561625 (+4 ms)Region opened successfully at 1733360561625 2024-12-05T01:02:41,625 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for 3a544c5760145e2a9f5308318c2ba2f9: Running coprocessor pre-open hook at 1733360561617Writing region info on filesystem at 1733360561617Initializing all the Stores at 1733360561618 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360561618Cleaning up temporary data from old regions at 1733360561621 (+3 ms)Running coprocessor post-open hooks at 1733360561625 (+4 ms)Region opened successfully at 1733360561625 2024-12-05T01:02:41,626 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f., pid=199, masterSystemTime=1733360561613 2024-12-05T01:02:41,626 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9., pid=198, masterSystemTime=1733360561613 2024-12-05T01:02:41,628 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:41,628 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:41,628 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=3a544c5760145e2a9f5308318c2ba2f9, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:41,629 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:41,629 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:41,630 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=0f2d269b46975c77edbdde8d1abc928f, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:41,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:02:41,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0f2d269b46975c77edbdde8d1abc928f, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:02:41,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=196 2024-12-05T01:02:41,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9, server=2113c16e5528,44435,1733360288855 in 170 msec 2024-12-05T01:02:41,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=197 2024-12-05T01:02:41,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure 0f2d269b46975c77edbdde8d1abc928f, server=2113c16e5528,41191,1733360288910 in 170 msec 2024-12-05T01:02:41,634 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a544c5760145e2a9f5308318c2ba2f9, ASSIGN in 326 msec 2024-12-05T01:02:41,635 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-12-05T01:02:41,635 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0f2d269b46975c77edbdde8d1abc928f, ASSIGN in 327 msec 2024-12-05T01:02:41,636 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:02:41,636 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360561636"}]},"ts":"1733360561636"} 2024-12-05T01:02:41,637 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-05T01:02:41,638 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:02:41,638 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-05T01:02:41,641 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T01:02:41,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:41,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:41,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:41,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:41,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:41,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 364 msec 2024-12-05T01:02:41,902 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-05T01:02:41,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-05T01:02:41,911 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-05T01:02:41,911 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-05T01:02:41,912 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:41,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32927 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32778 bytes) of info 2024-12-05T01:02:41,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-05T01:02:41,918 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:41,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-05T01:02:41,918 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T01:02:41,921 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T01:02:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360561921 (current time:1733360561921). 2024-12-05T01:02:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:02:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-05T01:02:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:02:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52062c0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:41,922 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:41,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:41,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:41,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38b5a56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:41,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:41,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:41,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:41,923 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33258, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:41,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40e46dd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:41,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:41,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:41,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:41,926 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44250, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:41,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:41,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:41,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:41,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:41,927 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:41,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@100d181b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:41,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:41,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:41,928 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:41,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:41,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:41,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7646f88f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:41,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:41,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:41,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:41,929 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33290, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:41,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a68b9f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:41,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:41,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:41,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:41,932 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44260, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:41,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:41,934 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:41,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:41,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:41,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:41,934 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:41,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T01:02:41,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:02:41,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T01:02:41,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-05T01:02:41,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-05T01:02:41,937 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:02:41,938 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:02:41,940 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:02:41,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742234_1410 (size=185) 2024-12-05T01:02:41,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742234_1410 (size=185) 2024-12-05T01:02:41,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742234_1410 (size=185) 2024-12-05T01:02:41,946 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:02:41,946 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0f2d269b46975c77edbdde8d1abc928f}] 2024-12-05T01:02:41,947 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:41,947 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:42,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-05T01:02:42,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-05T01:02:42,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for 3a544c5760145e2a9f5308318c2ba2f9: 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 0f2d269b46975c77edbdde8d1abc928f: 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:42,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:02:42,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742235_1411 (size=76) 2024-12-05T01:02:42,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742235_1411 (size=76) 2024-12-05T01:02:42,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742236_1412 (size=76) 2024-12-05T01:02:42,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742235_1411 (size=76) 2024-12-05T01:02:42,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742236_1412 (size=76) 2024-12-05T01:02:42,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:42,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-05T01:02:42,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742236_1412 (size=76) 2024-12-05T01:02:42,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:42,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-05T01:02:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-05T01:02:42,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-05T01:02:42,106 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:42,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:42,107 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:42,108 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0f2d269b46975c77edbdde8d1abc928f in 161 msec 2024-12-05T01:02:42,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=201, resume processing ppid=200 2024-12-05T01:02:42,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9 in 161 msec 2024-12-05T01:02:42,109 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:02:42,110 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:02:42,111 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:02:42,111 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,111 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742237_1413 (size=567) 2024-12-05T01:02:42,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742237_1413 (size=567) 2024-12-05T01:02:42,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742237_1413 (size=567) 2024-12-05T01:02:42,120 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:02:42,123 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:02:42,124 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,125 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:02:42,125 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-05T01:02:42,126 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 190 msec 2024-12-05T01:02:42,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-05T01:02:42,252 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-05T01:02:42,255 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='0a7b308755df7efa7dce9481ead635d90', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:02:42,256 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='1bc3a577c00b515b0e3c19b3bc5051bb3', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:42,256 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='21782a2623abc4d3f062410e4baaf99bd', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:42,257 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='338f54626f5390c69fc35f2227ba93975', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:42,258 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='40ad8b5137bde9d56cda48232167d15f4', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:42,259 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='55164ea33452c7d11027fba994d4cdf5f', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:02:42,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44435 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:02:42,261 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41191 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:02:42,261 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T01:02:42,263 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-05T01:02:42,263 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:42,263 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:42,265 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T01:02:42,268 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T01:02:42,272 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T01:02:42,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T01:02:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360562274 (current time:1733360562274). 2024-12-05T01:02:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:02:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-05T01:02:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:02:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67493a0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:42,275 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:42,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:42,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:42,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e337c81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:42,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:42,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:42,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:42,276 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33310, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:42,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@650d7476, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:42,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:42,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:42,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:42,278 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44276, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:42,279 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:42,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:42,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:42,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:42,279 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:42,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71afc814, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:42,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:42,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:42,280 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:42,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:42,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:42,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65f9adf2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:42,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:42,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:42,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:42,281 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33324, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:42,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3458b308, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:42,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:42,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:42,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:42,283 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44288, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:42,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:42,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:42,285 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T01:02:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:02:42,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T01:02:42,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-05T01:02:42,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-05T01:02:42,288 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:02:42,288 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:02:42,290 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:02:42,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742238_1414 (size=180) 2024-12-05T01:02:42,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742238_1414 (size=180) 2024-12-05T01:02:42,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742238_1414 (size=180) 2024-12-05T01:02:42,296 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:02:42,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0f2d269b46975c77edbdde8d1abc928f}] 2024-12-05T01:02:42,297 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:42,297 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-05T01:02:42,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-05T01:02:42,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-05T01:02:42,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:42,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:42,449 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing 3a544c5760145e2a9f5308318c2ba2f9 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-05T01:02:42,449 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing 0f2d269b46975c77edbdde8d1abc928f 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-05T01:02:42,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/.tmp/cf/4d889a6ad849485495e6a669cb2b2e58 is 71, key is 012599dd0256dc2ebaa485e537862dcd/cf:q/1733360562259/Put/seqid=0 2024-12-05T01:02:42,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/.tmp/cf/95dc09c156f04181b0732bfbdb9d0a25 is 71, key is 119ddc240d232842e47aaebe57fb64d7/cf:q/1733360562260/Put/seqid=0 2024-12-05T01:02:42,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742239_1415 (size=5356) 2024-12-05T01:02:42,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742239_1415 (size=5356) 2024-12-05T01:02:42,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742239_1415 (size=5356) 2024-12-05T01:02:42,472 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/.tmp/cf/4d889a6ad849485495e6a669cb2b2e58 2024-12-05T01:02:42,476 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/.tmp/cf/4d889a6ad849485495e6a669cb2b2e58 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/cf/4d889a6ad849485495e6a669cb2b2e58 2024-12-05T01:02:42,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742240_1416 (size=8256) 2024-12-05T01:02:42,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742240_1416 (size=8256) 2024-12-05T01:02:42,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742240_1416 (size=8256) 2024-12-05T01:02:42,479 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/.tmp/cf/95dc09c156f04181b0732bfbdb9d0a25 2024-12-05T01:02:42,481 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/cf/4d889a6ad849485495e6a669cb2b2e58, entries=4, sequenceid=6, filesize=5.2 K 2024-12-05T01:02:42,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/.tmp/cf/95dc09c156f04181b0732bfbdb9d0a25 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/cf/95dc09c156f04181b0732bfbdb9d0a25 2024-12-05T01:02:42,483 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 3a544c5760145e2a9f5308318c2ba2f9 in 34ms, sequenceid=6, compaction requested=false 2024-12-05T01:02:42,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 3a544c5760145e2a9f5308318c2ba2f9: 2024-12-05T01:02:42,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-05T01:02:42,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:42,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/cf/4d889a6ad849485495e6a669cb2b2e58] hfiles 2024-12-05T01:02:42,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/cf/4d889a6ad849485495e6a669cb2b2e58 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,487 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/cf/95dc09c156f04181b0732bfbdb9d0a25, entries=46, sequenceid=6, filesize=8.1 K 2024-12-05T01:02:42,488 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 0f2d269b46975c77edbdde8d1abc928f in 39ms, sequenceid=6, compaction requested=false 2024-12-05T01:02:42,488 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 0f2d269b46975c77edbdde8d1abc928f: 2024-12-05T01:02:42,488 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-05T01:02:42,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:42,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742241_1417 (size=115) 2024-12-05T01:02:42,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/cf/95dc09c156f04181b0732bfbdb9d0a25] hfiles 2024-12-05T01:02:42,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/cf/95dc09c156f04181b0732bfbdb9d0a25 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742241_1417 (size=115) 2024-12-05T01:02:42,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742241_1417 (size=115) 2024-12-05T01:02:42,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:42,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-05T01:02:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-05T01:02:42,490 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:42,490 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:42,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9 in 195 msec 2024-12-05T01:02:42,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742242_1418 (size=115) 2024-12-05T01:02:42,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742242_1418 (size=115) 2024-12-05T01:02:42,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742242_1418 (size=115) 2024-12-05T01:02:42,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:42,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-05T01:02:42,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-05T01:02:42,495 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:42,495 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:42,497 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-12-05T01:02:42,497 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0f2d269b46975c77edbdde8d1abc928f in 199 msec 2024-12-05T01:02:42,497 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:02:42,498 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:02:42,498 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:02:42,498 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,499 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742243_1419 (size=645) 2024-12-05T01:02:42,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742243_1419 (size=645) 2024-12-05T01:02:42,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742243_1419 (size=645) 2024-12-05T01:02:42,507 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:02:42,511 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:02:42,512 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,513 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:02:42,513 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-05T01:02:42,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 227 msec 2024-12-05T01:02:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-05T01:02:42,601 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-05T01:02:42,602 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360562602 2024-12-05T01:02:42,602 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37149, tgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360562602, rawTgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360562602, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:42,631 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:42,631 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360562602, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360562602/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,632 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T01:02:42,635 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360562602/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:42,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742245_1421 (size=567) 2024-12-05T01:02:42,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742244_1420 (size=185) 2024-12-05T01:02:42,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742245_1421 (size=567) 2024-12-05T01:02:42,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742244_1420 (size=185) 2024-12-05T01:02:42,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742245_1421 (size=567) 2024-12-05T01:02:42,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742244_1420 (size=185) 2024-12-05T01:02:42,646 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:42,646 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:42,647 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:43,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-15856413244640614049.jar 2024-12-05T01:02:43,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:43,635 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:43,700 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-18369397088661713174.jar 2024-12-05T01:02:43,701 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:43,701 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:43,701 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:43,701 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:43,702 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:43,702 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:43,702 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T01:02:43,702 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T01:02:43,702 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T01:02:43,702 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T01:02:43,703 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T01:02:43,703 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T01:02:43,703 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T01:02:43,703 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T01:02:43,703 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T01:02:43,703 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T01:02:43,704 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T01:02:43,704 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:43,704 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:43,704 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:02:43,705 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:43,705 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:43,705 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:02:43,705 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:02:43,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742246_1422 (size=24020) 2024-12-05T01:02:43,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742246_1422 (size=24020) 2024-12-05T01:02:43,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742246_1422 (size=24020) 2024-12-05T01:02:43,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742247_1423 (size=77755) 2024-12-05T01:02:43,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742247_1423 (size=77755) 2024-12-05T01:02:43,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742247_1423 (size=77755) 2024-12-05T01:02:43,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742248_1424 (size=131360) 2024-12-05T01:02:43,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742248_1424 (size=131360) 2024-12-05T01:02:43,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742248_1424 (size=131360) 2024-12-05T01:02:43,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742249_1425 (size=111793) 2024-12-05T01:02:43,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742249_1425 (size=111793) 2024-12-05T01:02:43,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742249_1425 (size=111793) 2024-12-05T01:02:43,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742250_1426 (size=1832290) 2024-12-05T01:02:43,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742250_1426 (size=1832290) 2024-12-05T01:02:43,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742250_1426 (size=1832290) 2024-12-05T01:02:43,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742251_1427 (size=6424744) 2024-12-05T01:02:43,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742251_1427 (size=6424744) 2024-12-05T01:02:43,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742251_1427 (size=6424744) 2024-12-05T01:02:43,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742252_1428 (size=8360282) 2024-12-05T01:02:43,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742252_1428 (size=8360282) 2024-12-05T01:02:43,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742252_1428 (size=8360282) 2024-12-05T01:02:43,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742253_1429 (size=503880) 2024-12-05T01:02:43,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742253_1429 (size=503880) 2024-12-05T01:02:43,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742253_1429 (size=503880) 2024-12-05T01:02:43,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742254_1430 (size=322274) 2024-12-05T01:02:43,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742254_1430 (size=322274) 2024-12-05T01:02:43,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742254_1430 (size=322274) 2024-12-05T01:02:43,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742255_1431 (size=20406) 2024-12-05T01:02:43,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742255_1431 (size=20406) 2024-12-05T01:02:43,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742255_1431 (size=20406) 2024-12-05T01:02:43,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742256_1432 (size=45609) 2024-12-05T01:02:43,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742256_1432 (size=45609) 2024-12-05T01:02:43,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742256_1432 (size=45609) 2024-12-05T01:02:43,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742257_1433 (size=136454) 2024-12-05T01:02:43,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742257_1433 (size=136454) 2024-12-05T01:02:43,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742257_1433 (size=136454) 2024-12-05T01:02:43,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742258_1434 (size=1597136) 2024-12-05T01:02:43,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742258_1434 (size=1597136) 2024-12-05T01:02:43,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742258_1434 (size=1597136) 2024-12-05T01:02:43,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742259_1435 (size=30873) 2024-12-05T01:02:43,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742259_1435 (size=30873) 2024-12-05T01:02:43,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742259_1435 (size=30873) 2024-12-05T01:02:43,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742260_1436 (size=29229) 2024-12-05T01:02:43,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742260_1436 (size=29229) 2024-12-05T01:02:43,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742260_1436 (size=29229) 2024-12-05T01:02:43,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742261_1437 (size=903858) 2024-12-05T01:02:43,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742261_1437 (size=903858) 2024-12-05T01:02:43,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742261_1437 (size=903858) 2024-12-05T01:02:43,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742262_1438 (size=5175431) 2024-12-05T01:02:43,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742262_1438 (size=5175431) 2024-12-05T01:02:43,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742262_1438 (size=5175431) 2024-12-05T01:02:43,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742263_1439 (size=232881) 2024-12-05T01:02:43,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742263_1439 (size=232881) 2024-12-05T01:02:43,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742263_1439 (size=232881) 2024-12-05T01:02:43,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742264_1440 (size=1323991) 2024-12-05T01:02:43,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742264_1440 (size=1323991) 2024-12-05T01:02:43,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742264_1440 (size=1323991) 2024-12-05T01:02:43,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742265_1441 (size=4695811) 2024-12-05T01:02:43,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742265_1441 (size=4695811) 2024-12-05T01:02:43,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742265_1441 (size=4695811) 2024-12-05T01:02:43,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742266_1442 (size=1877034) 2024-12-05T01:02:43,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742266_1442 (size=1877034) 2024-12-05T01:02:43,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742266_1442 (size=1877034) 2024-12-05T01:02:43,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742267_1443 (size=217555) 2024-12-05T01:02:43,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742267_1443 (size=217555) 2024-12-05T01:02:43,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742267_1443 (size=217555) 2024-12-05T01:02:43,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742268_1444 (size=4188619) 2024-12-05T01:02:43,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742268_1444 (size=4188619) 2024-12-05T01:02:43,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742268_1444 (size=4188619) 2024-12-05T01:02:43,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742269_1445 (size=127628) 2024-12-05T01:02:43,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742269_1445 (size=127628) 2024-12-05T01:02:43,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742269_1445 (size=127628) 2024-12-05T01:02:43,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742270_1446 (size=443171) 2024-12-05T01:02:43,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742270_1446 (size=443171) 2024-12-05T01:02:43,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742270_1446 (size=443171) 2024-12-05T01:02:43,998 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T01:02:44,000 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-05T01:02:44,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742271_1447 (size=7) 2024-12-05T01:02:44,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742271_1447 (size=7) 2024-12-05T01:02:44,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742271_1447 (size=7) 2024-12-05T01:02:44,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742272_1448 (size=10) 2024-12-05T01:02:44,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742272_1448 (size=10) 2024-12-05T01:02:44,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742272_1448 (size=10) 2024-12-05T01:02:44,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742273_1449 (size=303988) 2024-12-05T01:02:44,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742273_1449 (size=303988) 2024-12-05T01:02:44,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742273_1449 (size=303988) 2024-12-05T01:02:44,040 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:02:44,040 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:02:44,065 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0008_000001 (auth:SIMPLE) from 127.0.0.1:36432 2024-12-05T01:02:46,495 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:02:48,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-05T01:02:48,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-05T01:02:48,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-05T01:02:48,431 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0008_000001 (auth:SIMPLE) from 127.0.0.1:59618 2024-12-05T01:02:48,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742274_1450 (size=349662) 2024-12-05T01:02:48,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742274_1450 (size=349662) 2024-12-05T01:02:48,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742274_1450 (size=349662) 2024-12-05T01:02:49,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742275_1451 (size=8568) 2024-12-05T01:02:49,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742275_1451 (size=8568) 2024-12-05T01:02:49,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742275_1451 (size=8568) 2024-12-05T01:02:49,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742276_1452 (size=460) 2024-12-05T01:02:49,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742276_1452 (size=460) 2024-12-05T01:02:49,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742276_1452 (size=460) 2024-12-05T01:02:49,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742277_1453 (size=8568) 2024-12-05T01:02:49,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742277_1453 (size=8568) 2024-12-05T01:02:49,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742277_1453 (size=8568) 2024-12-05T01:02:49,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742278_1454 (size=349662) 2024-12-05T01:02:49,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742278_1454 (size=349662) 2024-12-05T01:02:49,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742278_1454 (size=349662) 2024-12-05T01:02:51,131 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T01:02:51,132 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T01:02:51,136 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:51,136 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T01:02:51,137 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T01:02:51,137 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:51,137 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-05T01:02:51,137 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-05T01:02:51,137 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360562602/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360562602/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:51,137 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360562602/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-05T01:02:51,137 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360562602/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-05T01:02:51,142 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-05T01:02:51,144 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360571144"}]},"ts":"1733360571144"} 2024-12-05T01:02:51,146 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-05T01:02:51,146 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-05T01:02:51,147 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-05T01:02:51,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a544c5760145e2a9f5308318c2ba2f9, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0f2d269b46975c77edbdde8d1abc928f, UNASSIGN}] 2024-12-05T01:02:51,149 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0f2d269b46975c77edbdde8d1abc928f, UNASSIGN 2024-12-05T01:02:51,149 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a544c5760145e2a9f5308318c2ba2f9, UNASSIGN 2024-12-05T01:02:51,149 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=0f2d269b46975c77edbdde8d1abc928f, regionState=CLOSING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:02:51,149 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=3a544c5760145e2a9f5308318c2ba2f9, regionState=CLOSING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:51,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0f2d269b46975c77edbdde8d1abc928f, UNASSIGN because future has completed 2024-12-05T01:02:51,150 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:02:51,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0f2d269b46975c77edbdde8d1abc928f, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:02:51,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a544c5760145e2a9f5308318c2ba2f9, UNASSIGN because future has completed 2024-12-05T01:02:51,151 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:02:51,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:02:51,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-05T01:02:51,316 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:51,316 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:51,316 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:02:51,316 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:02:51,316 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 0f2d269b46975c77edbdde8d1abc928f, disabling compactions & flushes 2024-12-05T01:02:51,316 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing 3a544c5760145e2a9f5308318c2ba2f9, disabling compactions & flushes 2024-12-05T01:02:51,316 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:51,316 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:51,316 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:51,316 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:51,316 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. after waiting 0 ms 2024-12-05T01:02:51,316 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. after waiting 0 ms 2024-12-05T01:02:51,316 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:51,316 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:51,321 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:02:51,321 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:02:51,321 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:02:51,321 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f. 2024-12-05T01:02:51,321 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 0f2d269b46975c77edbdde8d1abc928f: Waiting for close lock at 1733360571316Running coprocessor pre-close hooks at 1733360571316Disabling compacts and flushes for region at 1733360571316Disabling writes for close at 1733360571316Writing region close event to WAL at 1733360571317 (+1 ms)Running coprocessor post-close hooks at 1733360571321 (+4 ms)Closed at 1733360571321 2024-12-05T01:02:51,321 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:02:51,321 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9. 2024-12-05T01:02:51,321 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for 3a544c5760145e2a9f5308318c2ba2f9: Waiting for close lock at 1733360571316Running coprocessor pre-close hooks at 1733360571316Disabling compacts and flushes for region at 1733360571316Disabling writes for close at 1733360571316Writing region close event to WAL at 1733360571317 (+1 ms)Running coprocessor post-close hooks at 1733360571321 (+4 ms)Closed at 1733360571321 2024-12-05T01:02:51,323 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:51,324 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=0f2d269b46975c77edbdde8d1abc928f, regionState=CLOSED 2024-12-05T01:02:51,324 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed 3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:51,324 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=3a544c5760145e2a9f5308318c2ba2f9, regionState=CLOSED 2024-12-05T01:02:51,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0f2d269b46975c77edbdde8d1abc928f, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:02:51,328 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:02:51,330 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=209 2024-12-05T01:02:51,331 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 0f2d269b46975c77edbdde8d1abc928f, server=2113c16e5528,41191,1733360288910 in 178 msec 2024-12-05T01:02:51,331 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=208 2024-12-05T01:02:51,331 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure 3a544c5760145e2a9f5308318c2ba2f9, server=2113c16e5528,44435,1733360288855 in 178 msec 2024-12-05T01:02:51,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0f2d269b46975c77edbdde8d1abc928f, UNASSIGN in 182 msec 2024-12-05T01:02:51,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=207 2024-12-05T01:02:51,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a544c5760145e2a9f5308318c2ba2f9, UNASSIGN in 183 msec 2024-12-05T01:02:51,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-05T01:02:51,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 187 msec 2024-12-05T01:02:51,337 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360571336"}]},"ts":"1733360571336"} 2024-12-05T01:02:51,338 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-05T01:02:51,338 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-05T01:02:51,341 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 197 msec 2024-12-05T01:02:51,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-05T01:02:51,462 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-05T01:02:51,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,464 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,465 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,467 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,468 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:51,468 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:51,470 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/recovered.edits] 2024-12-05T01:02:51,470 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/recovered.edits] 2024-12-05T01:02:51,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,471 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-05T01:02:51,471 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-05T01:02:51,471 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-05T01:02:51,471 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-05T01:02:51,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:51,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:51,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:51,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:51,474 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/cf/95dc09c156f04181b0732bfbdb9d0a25 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/cf/95dc09c156f04181b0732bfbdb9d0a25 2024-12-05T01:02:51,474 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/cf/4d889a6ad849485495e6a669cb2b2e58 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/cf/4d889a6ad849485495e6a669cb2b2e58 2024-12-05T01:02:51,475 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:51,475 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:51,475 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:51,475 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:51,477 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9/recovered.edits/9.seqid 2024-12-05T01:02:51,477 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f/recovered.edits/9.seqid 2024-12-05T01:02:51,478 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/3a544c5760145e2a9f5308318c2ba2f9 2024-12-05T01:02:51,478 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testEmptyExportFileSystemState/0f2d269b46975c77edbdde8d1abc928f 2024-12-05T01:02:51,478 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-05T01:02:51,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-05T01:02:51,480 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,482 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-05T01:02:51,483 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-05T01:02:51,484 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,484 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-05T01:02:51,484 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360571484"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:51,484 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360571484"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:51,486 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T01:02:51,486 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 3a544c5760145e2a9f5308318c2ba2f9, NAME => 'testtb-testEmptyExportFileSystemState,,1733360561282.3a544c5760145e2a9f5308318c2ba2f9.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 0f2d269b46975c77edbdde8d1abc928f, NAME => 'testtb-testEmptyExportFileSystemState,1,1733360561282.0f2d269b46975c77edbdde8d1abc928f.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T01:02:51,486 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-05T01:02:51,487 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360571486"}]},"ts":"9223372036854775807"} 2024-12-05T01:02:51,488 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-05T01:02:51,489 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,490 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 26 msec 2024-12-05T01:02:51,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-05T01:02:51,582 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-05T01:02:51,582 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-05T01:02:51,587 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-05T01:02:51,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:51,590 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-05T01:02:51,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-05T01:02:51,612 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=811 (was 803) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6685 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45431 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:58818 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 4938) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:41694 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_687323944_1 at /127.0.0.1:38074 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:33998 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:36445 from appattempt_1733360294895_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:45431 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=818 (was 785) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=533 (was 592), ProcessCount=14 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=5440 (was 5819) 2024-12-05T01:02:51,612 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-05T01:02:51,629 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=811, OpenFileDescriptor=818, MaxFileDescriptor=1048576, SystemLoadAverage=533, ProcessCount=14, AvailableMemoryMB=5440 2024-12-05T01:02:51,629 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-05T01:02:51,630 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:02:51,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-05T01:02:51,632 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:02:51,632 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:51,632 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-12-05T01:02:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-05T01:02:51,633 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:02:51,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742279_1455 (size=404) 2024-12-05T01:02:51,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742279_1455 (size=404) 2024-12-05T01:02:51,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742279_1455 (size=404) 2024-12-05T01:02:51,641 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c0e859b293ab81cb746413a04322b9d0, NAME => 'testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:51,642 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 7c34576004741106869ac4fb0de4f831, NAME => 'testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:51,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742280_1456 (size=65) 2024-12-05T01:02:51,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742280_1456 (size=65) 2024-12-05T01:02:51,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742280_1456 (size=65) 2024-12-05T01:02:51,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742281_1457 (size=65) 2024-12-05T01:02:51,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:51,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing c0e859b293ab81cb746413a04322b9d0, disabling compactions & flushes 2024-12-05T01:02:51,655 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:51,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:51,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. after waiting 0 ms 2024-12-05T01:02:51,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:51,655 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:51,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for c0e859b293ab81cb746413a04322b9d0: Waiting for close lock at 1733360571655Disabling compacts and flushes for region at 1733360571655Disabling writes for close at 1733360571655Writing region close event to WAL at 1733360571655Closed at 1733360571655 2024-12-05T01:02:51,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742281_1457 (size=65) 2024-12-05T01:02:51,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742281_1457 (size=65) 2024-12-05T01:02:51,656 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:51,656 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 7c34576004741106869ac4fb0de4f831, disabling compactions & flushes 2024-12-05T01:02:51,656 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:02:51,656 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:02:51,656 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. after waiting 0 ms 2024-12-05T01:02:51,656 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:02:51,656 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:02:51,656 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 7c34576004741106869ac4fb0de4f831: Waiting for close lock at 1733360571656Disabling compacts and flushes for region at 1733360571656Disabling writes for close at 1733360571656Writing region close event to WAL at 1733360571656Closed at 1733360571656 2024-12-05T01:02:51,657 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:02:51,657 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733360571657"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360571657"}]},"ts":"1733360571657"} 2024-12-05T01:02:51,657 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733360571657"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360571657"}]},"ts":"1733360571657"} 2024-12-05T01:02:51,660 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:02:51,660 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:02:51,660 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360571660"}]},"ts":"1733360571660"} 2024-12-05T01:02:51,662 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-05T01:02:51,662 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:02:51,663 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:02:51,663 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:02:51,663 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:02:51,663 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:02:51,663 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:02:51,663 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:02:51,663 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:02:51,663 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:02:51,663 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:02:51,663 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:02:51,663 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0e859b293ab81cb746413a04322b9d0, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7c34576004741106869ac4fb0de4f831, ASSIGN}] 2024-12-05T01:02:51,664 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7c34576004741106869ac4fb0de4f831, ASSIGN 2024-12-05T01:02:51,664 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0e859b293ab81cb746413a04322b9d0, ASSIGN 2024-12-05T01:02:51,665 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7c34576004741106869ac4fb0de4f831, ASSIGN; state=OFFLINE, location=2113c16e5528,44435,1733360288855; forceNewPlan=false, retain=false 2024-12-05T01:02:51,665 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0e859b293ab81cb746413a04322b9d0, ASSIGN; state=OFFLINE, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T01:02:51,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-05T01:02:51,815 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:02:51,816 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=c0e859b293ab81cb746413a04322b9d0, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:02:51,816 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=7c34576004741106869ac4fb0de4f831, regionState=OPENING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:51,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0e859b293ab81cb746413a04322b9d0, ASSIGN because future has completed 2024-12-05T01:02:51,818 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure c0e859b293ab81cb746413a04322b9d0, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:02:51,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7c34576004741106869ac4fb0de4f831, ASSIGN because future has completed 2024-12-05T01:02:51,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c34576004741106869ac4fb0de4f831, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:02:51,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-05T01:02:51,973 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:51,973 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => c0e859b293ab81cb746413a04322b9d0, NAME => 'testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T01:02:51,973 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:02:51,973 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => 7c34576004741106869ac4fb0de4f831, NAME => 'testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T01:02:51,973 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. service=AccessControlService 2024-12-05T01:02:51,973 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. service=AccessControlService 2024-12-05T01:02:51,974 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:51,974 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:02:51,974 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:51,974 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:51,974 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:51,974 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:02:51,974 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:51,974 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:51,974 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:51,974 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:51,975 INFO [StoreOpener-7c34576004741106869ac4fb0de4f831-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:51,975 INFO [StoreOpener-c0e859b293ab81cb746413a04322b9d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:51,976 INFO [StoreOpener-c0e859b293ab81cb746413a04322b9d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c0e859b293ab81cb746413a04322b9d0 columnFamilyName cf 2024-12-05T01:02:51,976 INFO [StoreOpener-7c34576004741106869ac4fb0de4f831-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c34576004741106869ac4fb0de4f831 columnFamilyName cf 2024-12-05T01:02:51,976 DEBUG [StoreOpener-c0e859b293ab81cb746413a04322b9d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:51,976 DEBUG [StoreOpener-7c34576004741106869ac4fb0de4f831-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:02:51,977 INFO [StoreOpener-c0e859b293ab81cb746413a04322b9d0-1 {}] regionserver.HStore(327): Store=c0e859b293ab81cb746413a04322b9d0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:51,977 INFO [StoreOpener-7c34576004741106869ac4fb0de4f831-1 {}] regionserver.HStore(327): Store=7c34576004741106869ac4fb0de4f831/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:02:51,977 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:51,977 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:51,977 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:51,977 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:51,978 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:51,978 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:51,978 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:51,978 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:51,978 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:51,978 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:51,979 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:51,979 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:51,981 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:51,981 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:02:51,981 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened c0e859b293ab81cb746413a04322b9d0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61243464, jitterRate=-0.08740127086639404}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:51,981 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened 7c34576004741106869ac4fb0de4f831; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73750851, jitterRate=0.09897331893444061}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:02:51,981 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:51,981 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:51,982 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for 7c34576004741106869ac4fb0de4f831: Running coprocessor pre-open hook at 1733360571974Writing region info on filesystem at 1733360571974Initializing all the Stores at 1733360571975 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360571975Cleaning up temporary data from old regions at 1733360571978 (+3 ms)Running coprocessor post-open hooks at 1733360571981 (+3 ms)Region opened successfully at 1733360571982 (+1 ms) 2024-12-05T01:02:51,982 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for c0e859b293ab81cb746413a04322b9d0: Running coprocessor pre-open hook at 1733360571974Writing region info on filesystem at 1733360571974Initializing all the Stores at 1733360571975 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360571975Cleaning up temporary data from old regions at 1733360571978 (+3 ms)Running coprocessor post-open hooks at 1733360571981 (+3 ms)Region opened successfully at 1733360571982 (+1 ms) 2024-12-05T01:02:51,983 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831., pid=217, masterSystemTime=1733360571970 2024-12-05T01:02:51,983 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0., pid=216, masterSystemTime=1733360571969 2024-12-05T01:02:51,984 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:51,984 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:51,985 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=c0e859b293ab81cb746413a04322b9d0, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:02:51,985 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:02:51,985 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:02:51,985 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=7c34576004741106869ac4fb0de4f831, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:02:51,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure c0e859b293ab81cb746413a04322b9d0, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:02:51,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c34576004741106869ac4fb0de4f831, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:02:51,989 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=214 2024-12-05T01:02:51,989 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure c0e859b293ab81cb746413a04322b9d0, server=2113c16e5528,32927,1733360288712 in 169 msec 2024-12-05T01:02:51,990 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=215 2024-12-05T01:02:51,990 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure 7c34576004741106869ac4fb0de4f831, server=2113c16e5528,44435,1733360288855 in 170 msec 2024-12-05T01:02:51,990 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0e859b293ab81cb746413a04322b9d0, ASSIGN in 326 msec 2024-12-05T01:02:51,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=215, resume processing ppid=213 2024-12-05T01:02:51,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7c34576004741106869ac4fb0de4f831, ASSIGN in 327 msec 2024-12-05T01:02:51,992 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:02:51,992 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360571992"}]},"ts":"1733360571992"} 2024-12-05T01:02:51,993 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-05T01:02:51,994 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:02:51,994 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-05T01:02:51,996 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-05T01:02:51,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:51,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:51,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:51,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:02:52,001 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:52,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:52,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:52,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:52,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:52,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:52,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:52,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T01:02:52,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 371 msec 2024-12-05T01:02:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-05T01:02:52,261 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-05T01:02:52,261 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-05T01:02:52,262 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:52,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32829 bytes) of info 2024-12-05T01:02:52,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-05T01:02:52,266 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:52,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-12-05T01:02:52,266 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T01:02:52,269 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-05T01:02:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360572269 (current time:1733360572269). 2024-12-05T01:02:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:02:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-05T01:02:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:02:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bc4d69d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:52,270 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:52,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:52,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:52,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c6007d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:52,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:52,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,272 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39886, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:52,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11d2b4cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:52,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:52,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:52,274 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36170, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:52,275 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,275 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:52,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bb2b97a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:52,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:52,276 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:52,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:52,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:52,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@325d973c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:52,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:52,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,277 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39908, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:52,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7450a218, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:52,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:52,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:52,279 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36178, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:52,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:52,281 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369. 2024-12-05T01:02:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,281 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:52,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-05T01:02:52,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:02:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-05T01:02:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-05T01:02:52,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-05T01:02:52,284 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:02:52,285 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:02:52,286 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:02:52,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742282_1458 (size=161) 2024-12-05T01:02:52,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742282_1458 (size=161) 2024-12-05T01:02:52,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742282_1458 (size=161) 2024-12-05T01:02:52,292 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:02:52,293 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c0e859b293ab81cb746413a04322b9d0}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c34576004741106869ac4fb0de4f831}] 2024-12-05T01:02:52,294 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:52,294 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-05T01:02:52,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-12-05T01:02:52,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-12-05T01:02:52,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:52,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:02:52,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for 7c34576004741106869ac4fb0de4f831: 2024-12-05T01:02:52,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for c0e859b293ab81cb746413a04322b9d0: 2024-12-05T01:02:52,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. for emptySnaptb0-testExportWithChecksum completed. 2024-12-05T01:02:52,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. for emptySnaptb0-testExportWithChecksum completed. 2024-12-05T01:02:52,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-05T01:02:52,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-05T01:02:52,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:52,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:52,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:02:52,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:02:52,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742283_1459 (size=68) 2024-12-05T01:02:52,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742283_1459 (size=68) 2024-12-05T01:02:52,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742284_1460 (size=68) 2024-12-05T01:02:52,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742283_1459 (size=68) 2024-12-05T01:02:52,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742284_1460 (size=68) 2024-12-05T01:02:52,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742284_1460 (size=68) 2024-12-05T01:02:52,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:52,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-12-05T01:02:52,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:02:52,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-12-05T01:02:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-12-05T01:02:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-12-05T01:02:52,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:52,453 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:52,453 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:52,453 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:52,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7c34576004741106869ac4fb0de4f831 in 161 msec 2024-12-05T01:02:52,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=219, resume processing ppid=218 2024-12-05T01:02:52,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c0e859b293ab81cb746413a04322b9d0 in 161 msec 2024-12-05T01:02:52,456 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:02:52,456 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:02:52,457 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:02:52,457 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-05T01:02:52,457 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-05T01:02:52,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742285_1461 (size=543) 2024-12-05T01:02:52,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742285_1461 (size=543) 2024-12-05T01:02:52,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742285_1461 (size=543) 2024-12-05T01:02:52,468 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:02:52,471 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:02:52,472 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-05T01:02:52,473 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:02:52,473 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-05T01:02:52,474 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 191 msec 2024-12-05T01:02:52,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-05T01:02:52,602 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-05T01:02:52,605 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='09c27c9e3da9778fbba7312ad27d91836', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:52,606 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='142c3f9cd321204cbaa01e6592c6ab726', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:02:52,607 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='2d0bf8b329dc24a4a96ba850f230067fa', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:02:52,608 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='367cae16da375d5457faa3cc9d4608d39', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:02:52,609 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='42785703b60df1ad102b3619f18ed4362', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831., hostname=2113c16e5528,44435,1733360288855, seqNum=2] 2024-12-05T01:02:52,611 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:02:52,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44435 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:02:52,614 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T01:02:52,616 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-05T01:02:52,616 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:52,616 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:02:52,617 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T01:02:52,622 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T01:02:52,627 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T01:02:52,629 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-05T01:02:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360572629 (current time:1733360572629). 2024-12-05T01:02:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:02:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-05T01:02:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:02:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52806d5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:52,631 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:52,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:52,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:52,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e2c9da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:52,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:52,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,633 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39926, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:52,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@606c4265, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:52,634 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:52,634 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:52,635 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36190, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:52,636 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:02:52,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:52,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,637 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27337736, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:02:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:02:52,639 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:02:52,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:02:52,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:02:52,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11a73a48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:02:52,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:02:52,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,640 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39938, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:02:52,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27b01bd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:02:52,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:02:52,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1] 2024-12-05T01:02:52,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:02:52,644 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36198, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:02:52,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:02:52,647 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:02:52,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:02:52,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:02:52,648 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:02:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-05T01:02:52,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:02:52,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-05T01:02:52,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-05T01:02:52,650 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:02:52,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-05T01:02:52,652 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:02:52,654 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:02:52,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742286_1462 (size=156) 2024-12-05T01:02:52,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742286_1462 (size=156) 2024-12-05T01:02:52,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742286_1462 (size=156) 2024-12-05T01:02:52,664 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:02:52,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c0e859b293ab81cb746413a04322b9d0}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c34576004741106869ac4fb0de4f831}] 2024-12-05T01:02:52,665 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:52,665 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-05T01:02:52,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44435 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-05T01:02:52,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-05T01:02:52,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:02:52,817 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing 7c34576004741106869ac4fb0de4f831 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-05T01:02:52,818 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:52,818 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing c0e859b293ab81cb746413a04322b9d0 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-05T01:02:52,835 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/.tmp/cf/a44df0633134492db404d4b377f4a10b is 71, key is 007d6d13d8a7ff32be4dfac4e0b64400/cf:q/1733360572611/Put/seqid=0 2024-12-05T01:02:52,836 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/.tmp/cf/423add9b8a414df1b2aeee16ff1e1847 is 71, key is 12b7276e01d4bdb7a0cf7851e73ac08b/cf:q/1733360572613/Put/seqid=0 2024-12-05T01:02:52,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742287_1463 (size=5356) 2024-12-05T01:02:52,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742287_1463 (size=5356) 2024-12-05T01:02:52,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742288_1464 (size=8256) 2024-12-05T01:02:52,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742288_1464 (size=8256) 2024-12-05T01:02:52,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742287_1463 (size=5356) 2024-12-05T01:02:52,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742288_1464 (size=8256) 2024-12-05T01:02:52,849 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/.tmp/cf/a44df0633134492db404d4b377f4a10b 2024-12-05T01:02:52,849 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/.tmp/cf/423add9b8a414df1b2aeee16ff1e1847 2024-12-05T01:02:52,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/.tmp/cf/a44df0633134492db404d4b377f4a10b as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b 2024-12-05T01:02:52,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/.tmp/cf/423add9b8a414df1b2aeee16ff1e1847 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847 2024-12-05T01:02:52,860 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b, entries=4, sequenceid=6, filesize=5.2 K 2024-12-05T01:02:52,860 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847, entries=46, sequenceid=6, filesize=8.1 K 2024-12-05T01:02:52,861 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for c0e859b293ab81cb746413a04322b9d0 in 43ms, sequenceid=6, compaction requested=false 2024-12-05T01:02:52,861 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 7c34576004741106869ac4fb0de4f831 in 44ms, sequenceid=6, compaction requested=false 2024-12-05T01:02:52,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-05T01:02:52,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for c0e859b293ab81cb746413a04322b9d0: 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for 7c34576004741106869ac4fb0de4f831: 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. for snaptb0-testExportWithChecksum completed. 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. for snaptb0-testExportWithChecksum completed. 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b] hfiles 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847] hfiles 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b for snapshot=snaptb0-testExportWithChecksum 2024-12-05T01:02:52,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847 for snapshot=snaptb0-testExportWithChecksum 2024-12-05T01:02:52,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742289_1465 (size=107) 2024-12-05T01:02:52,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742289_1465 (size=107) 2024-12-05T01:02:52,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742289_1465 (size=107) 2024-12-05T01:02:52,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742290_1466 (size=107) 2024-12-05T01:02:52,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742290_1466 (size=107) 2024-12-05T01:02:52,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:02:52,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-05T01:02:52,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-05T01:02:52,876 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:52,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742290_1466 (size=107) 2024-12-05T01:02:52,876 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c34576004741106869ac4fb0de4f831 2024-12-05T01:02:52,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:02:52,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-05T01:02:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-05T01:02:52,877 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:52,877 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:02:52,879 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7c34576004741106869ac4fb0de4f831 in 213 msec 2024-12-05T01:02:52,879 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=222, resume processing ppid=221 2024-12-05T01:02:52,880 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c0e859b293ab81cb746413a04322b9d0 in 214 msec 2024-12-05T01:02:52,880 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:02:52,880 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:02:52,881 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:02:52,881 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-05T01:02:52,882 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-05T01:02:52,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742291_1467 (size=621) 2024-12-05T01:02:52,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742291_1467 (size=621) 2024-12-05T01:02:52,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742291_1467 (size=621) 2024-12-05T01:02:52,892 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:02:52,896 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:02:52,896 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-05T01:02:52,898 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:02:52,898 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-05T01:02:52,899 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 249 msec 2024-12-05T01:02:52,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-05T01:02:52,972 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-05T01:02:52,972 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972 2024-12-05T01:02:52,972 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:53,000 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:02:53,000 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@5a301e32, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-05T01:02:53,002 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T01:02:53,005 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-05T01:02:53,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:53,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:53,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:53,807 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:02:54,088 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-3536344276913142081.jar 2024-12-05T01:02:54,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:54,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:54,155 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-14057233960851199035.jar 2024-12-05T01:02:54,156 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:54,156 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:54,156 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:54,157 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:54,157 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:54,157 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:02:54,157 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T01:02:54,157 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T01:02:54,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T01:02:54,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T01:02:54,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T01:02:54,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T01:02:54,159 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T01:02:54,159 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T01:02:54,159 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T01:02:54,159 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T01:02:54,159 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T01:02:54,160 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:54,160 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:54,160 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:02:54,160 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:54,161 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:02:54,161 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:02:54,161 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:02:54,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742292_1468 (size=24020) 2024-12-05T01:02:54,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742292_1468 (size=24020) 2024-12-05T01:02:54,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742292_1468 (size=24020) 2024-12-05T01:02:54,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742293_1469 (size=77755) 2024-12-05T01:02:54,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742293_1469 (size=77755) 2024-12-05T01:02:54,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742293_1469 (size=77755) 2024-12-05T01:02:54,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742294_1470 (size=6424744) 2024-12-05T01:02:54,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742294_1470 (size=6424744) 2024-12-05T01:02:54,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742294_1470 (size=6424744) 2024-12-05T01:02:54,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742295_1471 (size=131360) 2024-12-05T01:02:54,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742295_1471 (size=131360) 2024-12-05T01:02:54,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742295_1471 (size=131360) 2024-12-05T01:02:54,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742296_1472 (size=111793) 2024-12-05T01:02:54,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742296_1472 (size=111793) 2024-12-05T01:02:54,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742296_1472 (size=111793) 2024-12-05T01:02:54,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742297_1473 (size=1832290) 2024-12-05T01:02:54,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742297_1473 (size=1832290) 2024-12-05T01:02:54,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742297_1473 (size=1832290) 2024-12-05T01:02:54,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742298_1474 (size=443171) 2024-12-05T01:02:54,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742298_1474 (size=443171) 2024-12-05T01:02:54,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742298_1474 (size=443171) 2024-12-05T01:02:54,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742299_1475 (size=8360282) 2024-12-05T01:02:54,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742299_1475 (size=8360282) 2024-12-05T01:02:54,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742299_1475 (size=8360282) 2024-12-05T01:02:54,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742300_1476 (size=503880) 2024-12-05T01:02:54,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742300_1476 (size=503880) 2024-12-05T01:02:54,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742300_1476 (size=503880) 2024-12-05T01:02:54,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742301_1477 (size=322274) 2024-12-05T01:02:54,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742301_1477 (size=322274) 2024-12-05T01:02:54,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742301_1477 (size=322274) 2024-12-05T01:02:54,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742302_1478 (size=20406) 2024-12-05T01:02:54,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742302_1478 (size=20406) 2024-12-05T01:02:54,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742302_1478 (size=20406) 2024-12-05T01:02:54,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742303_1479 (size=45609) 2024-12-05T01:02:54,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742303_1479 (size=45609) 2024-12-05T01:02:54,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742303_1479 (size=45609) 2024-12-05T01:02:54,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742304_1480 (size=136454) 2024-12-05T01:02:54,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742304_1480 (size=136454) 2024-12-05T01:02:54,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742304_1480 (size=136454) 2024-12-05T01:02:54,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742305_1481 (size=1597136) 2024-12-05T01:02:54,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742305_1481 (size=1597136) 2024-12-05T01:02:54,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742305_1481 (size=1597136) 2024-12-05T01:02:54,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742306_1482 (size=30873) 2024-12-05T01:02:54,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742306_1482 (size=30873) 2024-12-05T01:02:54,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742306_1482 (size=30873) 2024-12-05T01:02:54,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742307_1483 (size=29229) 2024-12-05T01:02:54,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742307_1483 (size=29229) 2024-12-05T01:02:54,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742307_1483 (size=29229) 2024-12-05T01:02:54,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742308_1484 (size=903858) 2024-12-05T01:02:54,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742308_1484 (size=903858) 2024-12-05T01:02:54,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742308_1484 (size=903858) 2024-12-05T01:02:54,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742309_1485 (size=5175431) 2024-12-05T01:02:54,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742309_1485 (size=5175431) 2024-12-05T01:02:54,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742309_1485 (size=5175431) 2024-12-05T01:02:54,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742310_1486 (size=232881) 2024-12-05T01:02:54,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742310_1486 (size=232881) 2024-12-05T01:02:54,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742310_1486 (size=232881) 2024-12-05T01:02:54,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742311_1487 (size=1323991) 2024-12-05T01:02:54,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742311_1487 (size=1323991) 2024-12-05T01:02:54,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742311_1487 (size=1323991) 2024-12-05T01:02:54,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742312_1488 (size=4695811) 2024-12-05T01:02:54,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742312_1488 (size=4695811) 2024-12-05T01:02:54,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742312_1488 (size=4695811) 2024-12-05T01:02:55,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742313_1489 (size=1877034) 2024-12-05T01:02:55,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742313_1489 (size=1877034) 2024-12-05T01:02:55,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742313_1489 (size=1877034) 2024-12-05T01:02:55,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742314_1490 (size=217555) 2024-12-05T01:02:55,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742314_1490 (size=217555) 2024-12-05T01:02:55,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742314_1490 (size=217555) 2024-12-05T01:02:55,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742315_1491 (size=4188619) 2024-12-05T01:02:55,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742315_1491 (size=4188619) 2024-12-05T01:02:55,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742315_1491 (size=4188619) 2024-12-05T01:02:55,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742316_1492 (size=127628) 2024-12-05T01:02:55,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742316_1492 (size=127628) 2024-12-05T01:02:55,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742316_1492 (size=127628) 2024-12-05T01:02:55,088 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T01:02:55,090 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-05T01:02:55,092 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-05T01:02:55,092 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-05T01:02:55,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742317_1493 (size=441) 2024-12-05T01:02:55,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742317_1493 (size=441) 2024-12-05T01:02:55,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742317_1493 (size=441) 2024-12-05T01:02:55,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742318_1494 (size=21) 2024-12-05T01:02:55,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742318_1494 (size=21) 2024-12-05T01:02:55,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742318_1494 (size=21) 2024-12-05T01:02:55,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742319_1495 (size=304129) 2024-12-05T01:02:55,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742319_1495 (size=304129) 2024-12-05T01:02:55,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742319_1495 (size=304129) 2024-12-05T01:02:55,663 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:02:55,663 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:02:55,668 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0008_000001 (auth:SIMPLE) from 127.0.0.1:35290 2024-12-05T01:02:55,679 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0008/container_1733360294895_0008_01_000001/launch_container.sh] 2024-12-05T01:02:55,679 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0008/container_1733360294895_0008_01_000001/container_tokens] 2024-12-05T01:02:55,679 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0008/container_1733360294895_0008_01_000001/sysfs] 2024-12-05T01:02:55,889 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:43524 2024-12-05T01:02:58,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-05T01:02:58,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-05T01:02:58,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-05T01:03:01,057 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:45240 2024-12-05T01:03:01,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742320_1496 (size=349827) 2024-12-05T01:03:01,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742320_1496 (size=349827) 2024-12-05T01:03:01,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742320_1496 (size=349827) 2024-12-05T01:03:03,290 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:39136 2024-12-05T01:03:03,290 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:37960 2024-12-05T01:03:03,807 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:03:06,698 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:03:06,978 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000003/launch_container.sh] 2024-12-05T01:03:06,978 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000003/container_tokens] 2024-12-05T01:03:06,978 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972/archive/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T01:03:07,644 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_1/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000002/launch_container.sh] 2024-12-05T01:03:07,644 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_1/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000002/container_tokens] 2024-12-05T01:03:07,644 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_1/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000002/sysfs] 2024-12-05T01:03:08,158 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:39148 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972/archive/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T01:03:08,811 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=920.55 KB, freeSize=879.10 MB, max=880 MB, blockCount=5, accesses=7, hits=2, hitRatio=28.57%, , cachingAccesses=7, cachingHits=2, cachingHitsRatio=28.57%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-05T01:03:08,879 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-05T01:03:08,931 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-05T01:03:09,012 DEBUG [master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-05T01:03:09,012 DEBUG [master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-05T01:03:09,163 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:39150 2024-12-05T01:03:10,481 INFO [regionserver/2113c16e5528:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-05T01:03:10,496 INFO [regionserver/2113c16e5528:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-05T01:03:10,497 INFO [regionserver/2113c16e5528:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-05T01:03:10,962 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733360294895_0009_01_000006 while processing FINISH_CONTAINERS event 2024-12-05T01:03:11,431 INFO [regionserver/2113c16e5528:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 43918 ms 2024-12-05T01:03:11,484 DEBUG [master/2113c16e5528:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T01:03:11,485 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 8bbf90773c039aafe370b8325f074c96 changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:03:11,485 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region c0e859b293ab81cb746413a04322b9d0 changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:03:11,485 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 880916148c83f94d943041b7127cffb7 changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:03:11,485 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7c34576004741106869ac4fb0de4f831 changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:03:11,488 DEBUG [master/2113c16e5528:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-05T01:03:11,489 INFO [master/2113c16e5528:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-05T01:03:11,489 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-05T01:03:11,489 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:03:11,490 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 4 regions 2024-12-05T01:03:11,490 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 1 regions 2024-12-05T01:03:11,490 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 1 regions 2024-12-05T01:03:11,490 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:03:11,490 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:03:11,490 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:03:11,490 INFO [master/2113c16e5528:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:03:11,491 INFO [master/2113c16e5528:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:03:11,491 INFO [master/2113c16e5528:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:03:11,491 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-05T01:03:11,494 INFO [master/2113c16e5528:0.Chore.1 {}] balancer.StochasticLoadBalancer(376): Running balancer because cluster has sloppy server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-05T01:03:11,494 INFO [master/2113c16e5528:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.42833607907743, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=14400 2024-12-05T01:03:11,630 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733360294895_0009_01_000007 while processing FINISH_CONTAINERS event 2024-12-05T01:03:11,812 INFO [master/2113c16e5528:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 321 ms to try 14400 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.42833607907743 to a new imbalance of 0.017890131416098565. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3333333333333333, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8474799589822846, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8577153282654146, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-05T01:03:11,817 INFO [master/2113c16e5528:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 2, the balance interval is 150000 ms, and the max number regions in transition is 6 2024-12-05T01:03:11,818 INFO [master/2113c16e5528:0.Chore.1 {}] master.HMaster(2172): balance hri=1588230740, source=2113c16e5528,32927,1733360288712, destination=2113c16e5528,44435,1733360288855 2024-12-05T01:03:11,820 DEBUG [master/2113c16e5528:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-05T01:03:11,820 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-05T01:03:11,822 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:03:11,823 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,32927,1733360288712, state=CLOSING 2024-12-05T01:03:11,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:11,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:11,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:11,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:11,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:11,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:11,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:11,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:11,826 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-05T01:03:11,826 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:03:11,826 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1588230740, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:03:11,980 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(122): Close 1588230740 2024-12-05T01:03:11,980 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:03:11,980 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T01:03:11,981 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T01:03:11,981 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T01:03:11,981 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T01:03:11,981 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T01:03:11,981 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=76.94 KB heapSize=121.80 KB 2024-12-05T01:03:12,026 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/info/bb43f484f22741b995b98f6350de3a8b is 181, key is testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831./info:regioninfo/1733360571985/Put/seqid=0 2024-12-05T01:03:12,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742322_1498 (size=17550) 2024-12-05T01:03:12,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742322_1498 (size=17550) 2024-12-05T01:03:12,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742322_1498 (size=17550) 2024-12-05T01:03:12,057 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.78 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/info/bb43f484f22741b995b98f6350de3a8b 2024-12-05T01:03:12,085 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/ns/b70cfa77f859442c804ba89807197e2d is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9./ns:/1733360546685/DeleteFamily/seqid=0 2024-12-05T01:03:12,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742323_1499 (size=7924) 2024-12-05T01:03:12,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742323_1499 (size=7924) 2024-12-05T01:03:12,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742323_1499 (size=7924) 2024-12-05T01:03:12,101 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.37 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/ns/b70cfa77f859442c804ba89807197e2d 2024-12-05T01:03:12,141 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/rep_barrier/abb049d6552740b3b6094edd43cb2f22 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9./rep_barrier:/1733360546685/DeleteFamily/seqid=0 2024-12-05T01:03:12,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742324_1500 (size=8195) 2024-12-05T01:03:12,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742324_1500 (size=8195) 2024-12-05T01:03:12,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742324_1500 (size=8195) 2024-12-05T01:03:12,243 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.49 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/rep_barrier/abb049d6552740b3b6094edd43cb2f22 2024-12-05T01:03:12,284 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/table/b2923f07b6d545e79259939b55d042ee is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733360525366.0e7ee94856ad7fcaed02bac5c3cae8b9./table:/1733360546685/DeleteFamily/seqid=0 2024-12-05T01:03:12,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742325_1501 (size=9051) 2024-12-05T01:03:12,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742325_1501 (size=9051) 2024-12-05T01:03:12,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742325_1501 (size=9051) 2024-12-05T01:03:12,383 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/table/b2923f07b6d545e79259939b55d042ee 2024-12-05T01:03:12,401 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/info/bb43f484f22741b995b98f6350de3a8b as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/info/bb43f484f22741b995b98f6350de3a8b 2024-12-05T01:03:12,408 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/info/bb43f484f22741b995b98f6350de3a8b, entries=96, sequenceid=211, filesize=17.1 K 2024-12-05T01:03:12,409 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/ns/b70cfa77f859442c804ba89807197e2d as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/ns/b70cfa77f859442c804ba89807197e2d 2024-12-05T01:03:12,415 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/ns/b70cfa77f859442c804ba89807197e2d, entries=24, sequenceid=211, filesize=7.7 K 2024-12-05T01:03:12,416 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/rep_barrier/abb049d6552740b3b6094edd43cb2f22 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/rep_barrier/abb049d6552740b3b6094edd43cb2f22 2024-12-05T01:03:12,422 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/rep_barrier/abb049d6552740b3b6094edd43cb2f22, entries=22, sequenceid=211, filesize=8.0 K 2024-12-05T01:03:12,423 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/table/b2923f07b6d545e79259939b55d042ee as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/table/b2923f07b6d545e79259939b55d042ee 2024-12-05T01:03:12,428 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/table/b2923f07b6d545e79259939b55d042ee, entries=39, sequenceid=211, filesize=8.8 K 2024-12-05T01:03:12,431 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(3140): Finished flush of dataSize ~76.94 KB/78789, heapSize ~121.74 KB/124664, currentSize=0 B/0 for 1588230740 in 450ms, sequenceid=211, compaction requested=false 2024-12-05T01:03:12,461 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/recovered.edits/214.seqid, newMaxSeqId=214, maxSeqId=1 2024-12-05T01:03:12,461 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:03:12,461 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T01:03:12,462 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T01:03:12,462 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733360591980Running coprocessor pre-close hooks at 1733360591980Disabling compacts and flushes for region at 1733360591980Disabling writes for close at 1733360591981 (+1 ms)Obtaining lock to block concurrent updates at 1733360591981Preparing flush snapshotting stores in 1588230740 at 1733360591981Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=78789, getHeapSize=124664, getOffHeapSize=0, getCellsCount=595 at 1733360591981Flushing stores of hbase:meta,,1.1588230740 at 1733360591982 (+1 ms)Flushing 1588230740/info: creating writer at 1733360591982Flushing 1588230740/info: appending metadata at 1733360592026 (+44 ms)Flushing 1588230740/info: closing flushed file at 1733360592026Flushing 1588230740/ns: creating writer at 1733360592064 (+38 ms)Flushing 1588230740/ns: appending metadata at 1733360592085 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1733360592085Flushing 1588230740/rep_barrier: creating writer at 1733360592110 (+25 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733360592141 (+31 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733360592141Flushing 1588230740/table: creating writer at 1733360592255 (+114 ms)Flushing 1588230740/table: appending metadata at 1733360592284 (+29 ms)Flushing 1588230740/table: closing flushed file at 1733360592284Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6eacb7df: reopening flushed file at 1733360592399 (+115 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52b48f: reopening flushed file at 1733360592408 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@130b619e: reopening flushed file at 1733360592416 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2cf02f01: reopening flushed file at 1733360592422 (+6 ms)Finished flush of dataSize ~76.94 KB/78789, heapSize ~121.74 KB/124664, currentSize=0 B/0 for 1588230740 in 450ms, sequenceid=211, compaction requested=false at 1733360592431 (+9 ms)Writing region close event to WAL at 1733360592450 (+19 ms)Running coprocessor post-close hooks at 1733360592461 (+11 ms)Closed at 1733360592462 (+1 ms) 2024-12-05T01:03:12,462 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionServer(3302): Adding 1588230740 move to 2113c16e5528,44435,1733360288855 record at close sequenceid=211 2024-12-05T01:03:12,465 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(157): Closed 1588230740 2024-12-05T01:03:12,465 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=CLOSED 2024-12-05T01:03:12,466 WARN [PEWorker-3 {}] zookeeper.MetaTableLocator(168): Tried to set null ServerName in hbase:meta; skipping -- ServerName required 2024-12-05T01:03:12,466 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=225, ppid=224, state=RUNNABLE, hasLock=true; CloseRegionProcedure 1588230740, server=2113c16e5528,32927,1733360288712 2024-12-05T01:03:12,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-12-05T01:03:12,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseRegionProcedure 1588230740, server=2113c16e5528,32927,1733360288712 in 640 msec 2024-12-05T01:03:12,470 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE; state=CLOSED, location=2113c16e5528,44435,1733360288855; forceNewPlan=false, retain=false 2024-12-05T01:03:12,620 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T01:03:12,621 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:03:12,622 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,44435,1733360288855, state=OPENING 2024-12-05T01:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:12,624 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:12,625 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:12,625 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-05T01:03:12,625 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:03:12,625 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:12,625 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:12,698 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000004/launch_container.sh] 2024-12-05T01:03:12,698 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000004/container_tokens] 2024-12-05T01:03:12,698 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000004/sysfs] 2024-12-05T01:03:12,789 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T01:03:12,789 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T01:03:12,789 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-05T01:03:12,792 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2113c16e5528%2C44435%2C1733360288855.meta, suffix=.meta, logDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,44435,1733360288855, archiveDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/oldWALs, maxLogs=32 2024-12-05T01:03:12,812 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,44435,1733360288855/2113c16e5528%2C44435%2C1733360288855.meta.1733360592793.meta, exclude list is [], retry=0 2024-12-05T01:03:12,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46149,DS-8b9d97af-dfa8-4625-bfb7-06da988419bc,DISK] 2024-12-05T01:03:12,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41667,DS-8bafa01e-8f9d-46ad-945f-175cedad9809,DISK] 2024-12-05T01:03:12,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37797,DS-ffcd1894-adb0-4c96-8de9-f5d2258e4ec3,DISK] 2024-12-05T01:03:12,822 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/WALs/2113c16e5528,44435,1733360288855/2113c16e5528%2C44435%2C1733360288855.meta.1733360592793.meta 2024-12-05T01:03:12,823 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34391:34391),(127.0.0.1/127.0.0.1:37943:37943),(127.0.0.1/127.0.0.1:33543:33543)] 2024-12-05T01:03:12,823 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T01:03:12,823 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-05T01:03:12,824 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:03:12,824 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T01:03:12,824 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T01:03:12,824 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T01:03:12,824 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T01:03:12,824 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:03:12,824 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T01:03:12,824 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T01:03:12,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T01:03:12,830 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T01:03:12,830 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:03:12,841 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/info/bb43f484f22741b995b98f6350de3a8b 2024-12-05T01:03:12,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T01:03:12,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T01:03:12,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T01:03:12,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:03:12,848 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/ns/b70cfa77f859442c804ba89807197e2d 2024-12-05T01:03:12,848 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T01:03:12,848 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T01:03:12,850 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T01:03:12,850 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:03:12,865 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/rep_barrier/abb049d6552740b3b6094edd43cb2f22 2024-12-05T01:03:12,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T01:03:12,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T01:03:12,866 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T01:03:12,866 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:03:12,876 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/table/b2923f07b6d545e79259939b55d042ee 2024-12-05T01:03:12,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T01:03:12,876 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T01:03:12,877 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740 2024-12-05T01:03:12,882 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740 2024-12-05T01:03:12,883 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T01:03:12,883 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T01:03:12,884 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T01:03:12,886 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T01:03:12,886 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=215; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63852620, jitterRate=-0.04852181673049927}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T01:03:12,886 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T01:03:12,887 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733360592824Writing region info on filesystem at 1733360592825 (+1 ms)Initializing all the Stores at 1733360592825Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360592825Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360592829 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360592829Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360592829Cleaning up temporary data from old regions at 1733360592883 (+54 ms)Running coprocessor post-open hooks at 1733360592886 (+3 ms)Region opened successfully at 1733360592887 (+1 ms) 2024-12-05T01:03:12,888 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=226, masterSystemTime=1733360592777 2024-12-05T01:03:12,890 DEBUG [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T01:03:12,890 INFO [RS_OPEN_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_META, pid=226}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T01:03:12,891 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=215, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:03:12,892 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2113c16e5528,44435,1733360288855, state=OPEN 2024-12-05T01:03:12,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:12,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:12,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:12,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T01:03:12,900 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:12,900 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:12,900 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:12,900 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T01:03:12,900 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=226, ppid=224, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2113c16e5528,44435,1733360288855 2024-12-05T01:03:12,903 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-12-05T01:03:12,903 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2113c16e5528,44435,1733360288855 in 275 msec 2024-12-05T01:03:12,905 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE in 1.0850 sec 2024-12-05T01:03:12,923 INFO [master/2113c16e5528:0.Chore.1 {}] master.HMaster(2172): balance hri=29254d26df66b48412cf870ed83cb900, source=2113c16e5528,32927,1733360288712, destination=2113c16e5528,41191,1733360288910 2024-12-05T01:03:12,924 DEBUG [master/2113c16e5528:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=29254d26df66b48412cf870ed83cb900, REOPEN/MOVE 2024-12-05T01:03:12,924 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=29254d26df66b48412cf870ed83cb900, REOPEN/MOVE 2024-12-05T01:03:12,925 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=29254d26df66b48412cf870ed83cb900, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:03:12,928 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] ipc.CallRunner(138): callId: 390 service: ClientService methodName: Mutate size: 257 connection: 172.17.0.2:55999 deadline: 1733360652926, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=44435 startCode=1733360288855. As of locationSeqNum=211. 2024-12-05T01:03:12,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=44435 startCode=1733360288855. As of locationSeqNum=211. 2024-12-05T01:03:12,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=44435 startCode=1733360288855. As of locationSeqNum=211. 2024-12-05T01:03:12,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1 with the new location region=hbase:meta,,1.1588230740, hostname=2113c16e5528,44435,1733360288855, seqNum=211 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=44435 startCode=1733360288855. As of locationSeqNum=211. 2024-12-05T01:03:13,015 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=2113c16e5528,32927,1733360288712, table=hbase:acl, region=29254d26df66b48412cf870ed83cb900. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-05T01:03:13,041 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:03:13,043 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55953, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:03:13,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=29254d26df66b48412cf870ed83cb900, REOPEN/MOVE because future has completed 2024-12-05T01:03:13,045 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:03:13,045 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 29254d26df66b48412cf870ed83cb900, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:03:13,198 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(122): Close 29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,198 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:03:13,198 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1722): Closing 29254d26df66b48412cf870ed83cb900, disabling compactions & flushes 2024-12-05T01:03:13,198 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1755): Closing region hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:03:13,198 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:03:13,198 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. after waiting 0 ms 2024-12-05T01:03:13,198 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:03:13,198 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(2902): Flushing 29254d26df66b48412cf870ed83cb900 1/1 column families, dataSize=1.47 KB heapSize=3.49 KB 2024-12-05T01:03:13,243 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/.tmp/l/a5d659d1d94f48448c5b6bcdf6d8a193 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733360546666/DeleteFamily/seqid=0 2024-12-05T01:03:13,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742327_1503 (size=5791) 2024-12-05T01:03:13,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742327_1503 (size=5791) 2024-12-05T01:03:13,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742327_1503 (size=5791) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972/archive/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T01:03:13,660 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=28 (bloomFilter=false), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/.tmp/l/a5d659d1d94f48448c5b6bcdf6d8a193 2024-12-05T01:03:13,664 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a5d659d1d94f48448c5b6bcdf6d8a193 2024-12-05T01:03:13,665 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/.tmp/l/a5d659d1d94f48448c5b6bcdf6d8a193 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/l/a5d659d1d94f48448c5b6bcdf6d8a193 2024-12-05T01:03:13,669 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a5d659d1d94f48448c5b6bcdf6d8a193 2024-12-05T01:03:13,669 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/l/a5d659d1d94f48448c5b6bcdf6d8a193, entries=13, sequenceid=28, filesize=5.7 K 2024-12-05T01:03:13,670 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(3140): Finished flush of dataSize ~1.47 KB/1504, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 29254d26df66b48412cf870ed83cb900 in 471ms, sequenceid=28, compaction requested=false 2024-12-05T01:03:13,672 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/recovered.edits/31.seqid, newMaxSeqId=31, maxSeqId=1 2024-12-05T01:03:13,673 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:03:13,673 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1973): Closed hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:03:13,673 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1676): Region close journal for 29254d26df66b48412cf870ed83cb900: Waiting for close lock at 1733360593198Running coprocessor pre-close hooks at 1733360593198Disabling compacts and flushes for region at 1733360593198Disabling writes for close at 1733360593198Obtaining lock to block concurrent updates at 1733360593198Preparing flush snapshotting stores in 29254d26df66b48412cf870ed83cb900 at 1733360593198Finished memstore snapshotting hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., syncing WAL and waiting on mvcc, flushsize=dataSize=1504, getHeapSize=3560, getOffHeapSize=0, getCellsCount=24 at 1733360593199 (+1 ms)Flushing stores of hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. at 1733360593202 (+3 ms)Flushing 29254d26df66b48412cf870ed83cb900/l: creating writer at 1733360593203 (+1 ms)Flushing 29254d26df66b48412cf870ed83cb900/l: appending metadata at 1733360593242 (+39 ms)Flushing 29254d26df66b48412cf870ed83cb900/l: closing flushed file at 1733360593243 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ff2053: reopening flushed file at 1733360593664 (+421 ms)Finished flush of dataSize ~1.47 KB/1504, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 29254d26df66b48412cf870ed83cb900 in 471ms, sequenceid=28, compaction requested=false at 1733360593670 (+6 ms)Writing region close event to WAL at 1733360593670Running coprocessor post-close hooks at 1733360593673 (+3 ms)Closed at 1733360593673 2024-12-05T01:03:13,673 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegionServer(3302): Adding 29254d26df66b48412cf870ed83cb900 move to 2113c16e5528,41191,1733360288910 record at close sequenceid=28 2024-12-05T01:03:13,675 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(157): Closed 29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,675 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=29254d26df66b48412cf870ed83cb900, regionState=CLOSED 2024-12-05T01:03:13,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 29254d26df66b48412cf870ed83cb900, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:03:13,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-12-05T01:03:13,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseRegionProcedure 29254d26df66b48412cf870ed83cb900, server=2113c16e5528,32927,1733360288712 in 632 msec 2024-12-05T01:03:13,679 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=29254d26df66b48412cf870ed83cb900, REOPEN/MOVE; state=CLOSED, location=2113c16e5528,41191,1733360288910; forceNewPlan=false, retain=false 2024-12-05T01:03:13,697 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_1/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000005/launch_container.sh] 2024-12-05T01:03:13,697 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_1/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000005/container_tokens] 2024-12-05T01:03:13,697 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_1/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000005/sysfs] 2024-12-05T01:03:13,830 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T01:03:13,830 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=29254d26df66b48412cf870ed83cb900, regionState=OPENING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:03:13,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=29254d26df66b48412cf870ed83cb900, REOPEN/MOVE because future has completed 2024-12-05T01:03:13,832 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=227, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29254d26df66b48412cf870ed83cb900, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:03:13,986 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] handler.AssignRegionHandler(132): Open hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:03:13,986 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(7752): Opening region: {ENCODED => 29254d26df66b48412cf870ed83cb900, NAME => 'hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900.', STARTKEY => '', ENDKEY => ''} 2024-12-05T01:03:13,987 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. service=AccessControlService 2024-12-05T01:03:13,987 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:03:13,987 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,987 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(898): Instantiated hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:03:13,987 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(7794): checking encryption for 29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,987 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(7797): checking classloading for 29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,988 INFO [StoreOpener-29254d26df66b48412cf870ed83cb900-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,989 INFO [StoreOpener-29254d26df66b48412cf870ed83cb900-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29254d26df66b48412cf870ed83cb900 columnFamilyName l 2024-12-05T01:03:13,989 DEBUG [StoreOpener-29254d26df66b48412cf870ed83cb900-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:03:13,993 INFO [StoreFileOpener-29254d26df66b48412cf870ed83cb900-l-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a5d659d1d94f48448c5b6bcdf6d8a193 2024-12-05T01:03:13,993 DEBUG [StoreOpener-29254d26df66b48412cf870ed83cb900-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/l/a5d659d1d94f48448c5b6bcdf6d8a193 2024-12-05T01:03:13,994 INFO [StoreOpener-29254d26df66b48412cf870ed83cb900-1 {}] regionserver.HStore(327): Store=29254d26df66b48412cf870ed83cb900/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:03:13,994 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1038): replaying wal for 29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,994 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,995 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,995 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1048): stopping wal replay for 29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,995 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1060): Cleaning up temporary data for 29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,997 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1093): writing seq id for 29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:13,998 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1114): Opened 29254d26df66b48412cf870ed83cb900; next sequenceid=32; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74623354, jitterRate=0.11197462677955627}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:03:13,998 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 29254d26df66b48412cf870ed83cb900 2024-12-05T01:03:14,001 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-05T01:03:14,001 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-05T01:03:14,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-05T01:03:14,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-05T01:03:14,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-05T01:03:14,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-05T01:03:14,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:14,008 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:14,008 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:14,008 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:14,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:14,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:14,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:14,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:14,013 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:14,013 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1006): Region open journal for 29254d26df66b48412cf870ed83cb900: Running coprocessor pre-open hook at 1733360593987Writing region info on filesystem at 1733360593987Initializing all the Stores at 1733360593988 (+1 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733360593988Cleaning up temporary data from old regions at 1733360593995 (+7 ms)Running coprocessor post-open hooks at 1733360593998 (+3 ms)Region opened successfully at 1733360594013 (+15 ms) 2024-12-05T01:03:14,014 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:14,014 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:14,015 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:14,015 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., pid=229, masterSystemTime=1733360593983 2024-12-05T01:03:14,017 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:03:14,017 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:03:14,017 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=29254d26df66b48412cf870ed83cb900, regionState=OPEN, openSeqNum=32, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:03:14,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=227, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29254d26df66b48412cf870ed83cb900, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:03:14,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=227 2024-12-05T01:03:14,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=227, state=SUCCESS, hasLock=false; OpenRegionProcedure 29254d26df66b48412cf870ed83cb900, server=2113c16e5528,41191,1733360288910 in 187 msec 2024-12-05T01:03:14,022 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=29254d26df66b48412cf870ed83cb900, REOPEN/MOVE in 1.0980 sec 2024-12-05T01:03:14,025 DEBUG [master/2113c16e5528:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-05T01:03:14,040 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-05T01:03:14,040 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-05T01:03:14,180 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:42432 2024-12-05T01:03:14,278 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 880916148c83f94d943041b7127cffb7, had cached 0 bytes from a total of 5422 2024-12-05T01:03:14,278 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8bbf90773c039aafe370b8325f074c96, had cached 0 bytes from a total of 8188 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972/archive/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T01:03:15,202 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:42448 2024-12-05T01:03:16,651 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:03:16,964 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733360294895_0009_01_000010 while processing FINISH_CONTAINERS event 2024-12-05T01:03:17,694 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733360294895_0009_01_000011 while processing FINISH_CONTAINERS event Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972/archive/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T01:03:20,215 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:42456 2024-12-05T01:03:21,647 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000009/launch_container.sh] 2024-12-05T01:03:21,647 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000009/container_tokens] 2024-12-05T01:03:21,647 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000009/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/local-export-1733360572972/archive/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T01:03:23,234 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:49442 2024-12-05T01:03:23,542 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000008/launch_container.sh] 2024-12-05T01:03:23,542 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000008/container_tokens] 2024-12-05T01:03:23,542 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000008/sysfs] 2024-12-05T01:03:26,496 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:49452 2024-12-05T01:03:26,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742321_1497 (size=30401) 2024-12-05T01:03:26,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742321_1497 (size=30401) 2024-12-05T01:03:26,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742321_1497 (size=30401) 2024-12-05T01:03:26,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742328_1504 (size=460) 2024-12-05T01:03:26,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742328_1504 (size=460) 2024-12-05T01:03:26,567 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733360294895_0009_01_000013 is : 143 2024-12-05T01:03:26,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742328_1504 (size=460) 2024-12-05T01:03:26,575 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000013/launch_container.sh] 2024-12-05T01:03:26,575 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000013/container_tokens] 2024-12-05T01:03:26,575 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000013/sysfs] 2024-12-05T01:03:26,632 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000012/launch_container.sh] 2024-12-05T01:03:26,632 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000012/container_tokens] 2024-12-05T01:03:26,632 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000012/sysfs] 2024-12-05T01:03:26,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742329_1505 (size=30401) 2024-12-05T01:03:26,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742329_1505 (size=30401) 2024-12-05T01:03:26,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742329_1505 (size=30401) 2024-12-05T01:03:26,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742330_1506 (size=349827) 2024-12-05T01:03:26,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742330_1506 (size=349827) 2024-12-05T01:03:26,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742330_1506 (size=349827) 2024-12-05T01:03:27,089 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:49466 2024-12-05T01:03:28,451 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733360294895_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T01:03:28,452 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360608452 2024-12-05T01:03:28,453 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37149, tgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360608452, rawTgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360608452, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:03:28,512 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:03:28,512 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360608452, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360608452/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-05T01:03:28,581 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T01:03:28,615 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360608452/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-05T01:03:28,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742331_1507 (size=156) 2024-12-05T01:03:28,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742331_1507 (size=156) 2024-12-05T01:03:28,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742331_1507 (size=156) 2024-12-05T01:03:28,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742332_1508 (size=621) 2024-12-05T01:03:28,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742332_1508 (size=621) 2024-12-05T01:03:28,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742332_1508 (size=621) 2024-12-05T01:03:28,875 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:28,875 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:28,875 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:30,550 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-1032752430172725750.jar 2024-12-05T01:03:30,550 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:30,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:30,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-11481690188315201516.jar 2024-12-05T01:03:30,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:30,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:30,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:30,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:30,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:30,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:30,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T01:03:30,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T01:03:30,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T01:03:30,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T01:03:30,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T01:03:30,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T01:03:30,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T01:03:30,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T01:03:30,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T01:03:30,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T01:03:30,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T01:03:30,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:03:30,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:03:30,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:03:30,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:03:30,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:03:30,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:03:30,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:03:30,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742333_1509 (size=24020) 2024-12-05T01:03:30,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742333_1509 (size=24020) 2024-12-05T01:03:30,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742333_1509 (size=24020) 2024-12-05T01:03:30,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742334_1510 (size=77755) 2024-12-05T01:03:30,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742334_1510 (size=77755) 2024-12-05T01:03:30,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742334_1510 (size=77755) 2024-12-05T01:03:30,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742335_1511 (size=131360) 2024-12-05T01:03:30,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742335_1511 (size=131360) 2024-12-05T01:03:30,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742335_1511 (size=131360) 2024-12-05T01:03:30,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742336_1512 (size=111793) 2024-12-05T01:03:30,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742336_1512 (size=111793) 2024-12-05T01:03:30,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742336_1512 (size=111793) 2024-12-05T01:03:30,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742337_1513 (size=1832290) 2024-12-05T01:03:30,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742337_1513 (size=1832290) 2024-12-05T01:03:30,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742337_1513 (size=1832290) 2024-12-05T01:03:31,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742338_1514 (size=8360282) 2024-12-05T01:03:31,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742338_1514 (size=8360282) 2024-12-05T01:03:31,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742338_1514 (size=8360282) 2024-12-05T01:03:31,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742339_1515 (size=6424744) 2024-12-05T01:03:31,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742339_1515 (size=6424744) 2024-12-05T01:03:31,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742339_1515 (size=6424744) 2024-12-05T01:03:31,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742340_1516 (size=503880) 2024-12-05T01:03:31,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742340_1516 (size=503880) 2024-12-05T01:03:31,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742340_1516 (size=503880) 2024-12-05T01:03:31,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742341_1517 (size=322274) 2024-12-05T01:03:31,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742341_1517 (size=322274) 2024-12-05T01:03:31,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742341_1517 (size=322274) 2024-12-05T01:03:31,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742342_1518 (size=20406) 2024-12-05T01:03:31,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742342_1518 (size=20406) 2024-12-05T01:03:31,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742342_1518 (size=20406) 2024-12-05T01:03:31,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742343_1519 (size=45609) 2024-12-05T01:03:31,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742343_1519 (size=45609) 2024-12-05T01:03:31,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742343_1519 (size=45609) 2024-12-05T01:03:31,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742344_1520 (size=136454) 2024-12-05T01:03:31,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742344_1520 (size=136454) 2024-12-05T01:03:31,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742344_1520 (size=136454) 2024-12-05T01:03:31,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742345_1521 (size=443171) 2024-12-05T01:03:31,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742345_1521 (size=443171) 2024-12-05T01:03:31,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742345_1521 (size=443171) 2024-12-05T01:03:31,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742346_1522 (size=1597136) 2024-12-05T01:03:31,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742346_1522 (size=1597136) 2024-12-05T01:03:31,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742346_1522 (size=1597136) 2024-12-05T01:03:31,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742347_1523 (size=30873) 2024-12-05T01:03:31,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742347_1523 (size=30873) 2024-12-05T01:03:31,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742347_1523 (size=30873) 2024-12-05T01:03:31,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742348_1524 (size=29229) 2024-12-05T01:03:31,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742348_1524 (size=29229) 2024-12-05T01:03:31,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742348_1524 (size=29229) 2024-12-05T01:03:31,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742349_1525 (size=903858) 2024-12-05T01:03:31,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742349_1525 (size=903858) 2024-12-05T01:03:31,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742349_1525 (size=903858) 2024-12-05T01:03:31,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742350_1526 (size=5175431) 2024-12-05T01:03:31,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742350_1526 (size=5175431) 2024-12-05T01:03:31,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742350_1526 (size=5175431) 2024-12-05T01:03:31,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742351_1527 (size=232881) 2024-12-05T01:03:31,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742351_1527 (size=232881) 2024-12-05T01:03:31,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742351_1527 (size=232881) 2024-12-05T01:03:31,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742352_1528 (size=1323991) 2024-12-05T01:03:31,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742352_1528 (size=1323991) 2024-12-05T01:03:31,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742352_1528 (size=1323991) 2024-12-05T01:03:31,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742353_1529 (size=4695811) 2024-12-05T01:03:31,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742353_1529 (size=4695811) 2024-12-05T01:03:31,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742353_1529 (size=4695811) 2024-12-05T01:03:31,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742354_1530 (size=1877034) 2024-12-05T01:03:31,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742354_1530 (size=1877034) 2024-12-05T01:03:31,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742354_1530 (size=1877034) 2024-12-05T01:03:31,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742355_1531 (size=217555) 2024-12-05T01:03:31,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742355_1531 (size=217555) 2024-12-05T01:03:31,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742355_1531 (size=217555) 2024-12-05T01:03:31,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742356_1532 (size=4188619) 2024-12-05T01:03:31,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742356_1532 (size=4188619) 2024-12-05T01:03:31,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742356_1532 (size=4188619) 2024-12-05T01:03:31,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742357_1533 (size=127628) 2024-12-05T01:03:31,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742357_1533 (size=127628) 2024-12-05T01:03:31,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742357_1533 (size=127628) 2024-12-05T01:03:31,836 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T01:03:31,838 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-05T01:03:31,840 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-05T01:03:31,840 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-05T01:03:31,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742358_1534 (size=441) 2024-12-05T01:03:31,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742358_1534 (size=441) 2024-12-05T01:03:31,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742358_1534 (size=441) 2024-12-05T01:03:31,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742359_1535 (size=21) 2024-12-05T01:03:31,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742359_1535 (size=21) 2024-12-05T01:03:31,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742359_1535 (size=21) 2024-12-05T01:03:31,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742360_1536 (size=304079) 2024-12-05T01:03:31,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742360_1536 (size=304079) 2024-12-05T01:03:31,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742360_1536 (size=304079) 2024-12-05T01:03:33,208 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:03:33,208 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:03:33,212 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0009_000001 (auth:SIMPLE) from 127.0.0.1:55582 2024-12-05T01:03:33,222 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000001/launch_container.sh] 2024-12-05T01:03:33,222 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000001/container_tokens] 2024-12-05T01:03:33,222 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_0/usercache/jenkins/appcache/application_1733360294895_0009/container_1733360294895_0009_01_000001/sysfs] 2024-12-05T01:03:33,623 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0010_000001 (auth:SIMPLE) from 127.0.0.1:56728 2024-12-05T01:03:36,698 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:03:36,975 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7c34576004741106869ac4fb0de4f831, had cached 0 bytes from a total of 8256 2024-12-05T01:03:36,975 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c0e859b293ab81cb746413a04322b9d0, had cached 0 bytes from a total of 5356 2024-12-05T01:03:42,788 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0010_000001 (auth:SIMPLE) from 127.0.0.1:44476 2024-12-05T01:03:43,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742361_1537 (size=349777) 2024-12-05T01:03:43,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742361_1537 (size=349777) 2024-12-05T01:03:43,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742361_1537 (size=349777) 2024-12-05T01:03:44,997 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0010_000001 (auth:SIMPLE) from 127.0.0.1:58358 2024-12-05T01:03:44,997 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0010_000001 (auth:SIMPLE) from 127.0.0.1:39642 2024-12-05T01:03:48,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742362_1538 (size=5356) 2024-12-05T01:03:48,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742362_1538 (size=5356) 2024-12-05T01:03:48,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742362_1538 (size=5356) 2024-12-05T01:03:48,410 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0010/container_1733360294895_0010_01_000003/launch_container.sh] 2024-12-05T01:03:48,410 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0010/container_1733360294895_0010_01_000003/container_tokens] 2024-12-05T01:03:48,410 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_3/usercache/jenkins/appcache/application_1733360294895_0010/container_1733360294895_0010_01_000003/sysfs] 2024-12-05T01:03:48,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742364_1540 (size=8256) 2024-12-05T01:03:48,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742364_1540 (size=8256) 2024-12-05T01:03:48,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742364_1540 (size=8256) 2024-12-05T01:03:49,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742363_1539 (size=22150) 2024-12-05T01:03:49,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742363_1539 (size=22150) 2024-12-05T01:03:49,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742363_1539 (size=22150) 2024-12-05T01:03:49,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742365_1541 (size=462) 2024-12-05T01:03:49,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742365_1541 (size=462) 2024-12-05T01:03:49,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742365_1541 (size=462) 2024-12-05T01:03:49,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742366_1542 (size=22150) 2024-12-05T01:03:49,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742366_1542 (size=22150) 2024-12-05T01:03:49,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742366_1542 (size=22150) 2024-12-05T01:03:49,128 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0010/container_1733360294895_0010_01_000002/launch_container.sh] 2024-12-05T01:03:49,128 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0010/container_1733360294895_0010_01_000002/container_tokens] 2024-12-05T01:03:49,128 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0010/container_1733360294895_0010_01_000002/sysfs] 2024-12-05T01:03:49,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742367_1543 (size=349777) 2024-12-05T01:03:49,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742367_1543 (size=349777) 2024-12-05T01:03:49,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742367_1543 (size=349777) 2024-12-05T01:03:49,146 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0010_000001 (auth:SIMPLE) from 127.0.0.1:58362 2024-12-05T01:03:51,103 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T01:03:51,104 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T01:03:51,110 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-05T01:03:51,111 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T01:03:51,111 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T01:03:51,111 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-05T01:03:51,111 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-05T01:03:51,111 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-05T01:03:51,111 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360608452/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360608452/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-05T01:03:51,112 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360608452/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-05T01:03:51,112 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360608452/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-05T01:03:51,119 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-05T01:03:51,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=230, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-05T01:03:51,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-05T01:03:51,123 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360631122"}]},"ts":"1733360631122"} 2024-12-05T01:03:51,125 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-05T01:03:51,125 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-05T01:03:51,126 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=230, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-05T01:03:51,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0e859b293ab81cb746413a04322b9d0, UNASSIGN}, {pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7c34576004741106869ac4fb0de4f831, UNASSIGN}] 2024-12-05T01:03:51,128 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7c34576004741106869ac4fb0de4f831, UNASSIGN 2024-12-05T01:03:51,128 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0e859b293ab81cb746413a04322b9d0, UNASSIGN 2024-12-05T01:03:51,129 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=7c34576004741106869ac4fb0de4f831, regionState=CLOSING, regionLocation=2113c16e5528,44435,1733360288855 2024-12-05T01:03:51,129 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=c0e859b293ab81cb746413a04322b9d0, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:03:51,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7c34576004741106869ac4fb0de4f831, UNASSIGN because future has completed 2024-12-05T01:03:51,131 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:03:51,131 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=234, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7c34576004741106869ac4fb0de4f831, server=2113c16e5528,44435,1733360288855}] 2024-12-05T01:03:51,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0e859b293ab81cb746413a04322b9d0, UNASSIGN because future has completed 2024-12-05T01:03:51,132 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:03:51,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure c0e859b293ab81cb746413a04322b9d0, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:03:51,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-05T01:03:51,284 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(122): Close 7c34576004741106869ac4fb0de4f831 2024-12-05T01:03:51,284 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:03:51,284 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1722): Closing 7c34576004741106869ac4fb0de4f831, disabling compactions & flushes 2024-12-05T01:03:51,284 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:03:51,285 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:03:51,285 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. after waiting 0 ms 2024-12-05T01:03:51,285 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:03:51,286 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(122): Close c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:03:51,286 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:03:51,286 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1722): Closing c0e859b293ab81cb746413a04322b9d0, disabling compactions & flushes 2024-12-05T01:03:51,286 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:03:51,286 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:03:51,286 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. after waiting 0 ms 2024-12-05T01:03:51,286 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:03:51,297 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:03:51,297 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:03:51,297 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831. 2024-12-05T01:03:51,297 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1676): Region close journal for 7c34576004741106869ac4fb0de4f831: Waiting for close lock at 1733360631284Running coprocessor pre-close hooks at 1733360631284Disabling compacts and flushes for region at 1733360631284Disabling writes for close at 1733360631285 (+1 ms)Writing region close event to WAL at 1733360631285Running coprocessor post-close hooks at 1733360631297 (+12 ms)Closed at 1733360631297 2024-12-05T01:03:51,300 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(157): Closed 7c34576004741106869ac4fb0de4f831 2024-12-05T01:03:51,300 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=7c34576004741106869ac4fb0de4f831, regionState=CLOSED 2024-12-05T01:03:51,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=234, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7c34576004741106869ac4fb0de4f831, server=2113c16e5528,44435,1733360288855 because future has completed 2024-12-05T01:03:51,304 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=234, resume processing ppid=233 2024-12-05T01:03:51,304 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, ppid=233, state=SUCCESS, hasLock=false; CloseRegionProcedure 7c34576004741106869ac4fb0de4f831, server=2113c16e5528,44435,1733360288855 in 171 msec 2024-12-05T01:03:51,305 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7c34576004741106869ac4fb0de4f831, UNASSIGN in 177 msec 2024-12-05T01:03:51,308 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:03:51,309 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:03:51,309 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0. 2024-12-05T01:03:51,309 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1676): Region close journal for c0e859b293ab81cb746413a04322b9d0: Waiting for close lock at 1733360631286Running coprocessor pre-close hooks at 1733360631286Disabling compacts and flushes for region at 1733360631286Disabling writes for close at 1733360631286Writing region close event to WAL at 1733360631293 (+7 ms)Running coprocessor post-close hooks at 1733360631309 (+16 ms)Closed at 1733360631309 2024-12-05T01:03:51,310 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(157): Closed c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:03:51,311 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=c0e859b293ab81cb746413a04322b9d0, regionState=CLOSED 2024-12-05T01:03:51,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure c0e859b293ab81cb746413a04322b9d0, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:03:51,315 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=232 2024-12-05T01:03:51,315 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=232, state=SUCCESS, hasLock=false; CloseRegionProcedure c0e859b293ab81cb746413a04322b9d0, server=2113c16e5528,32927,1733360288712 in 181 msec 2024-12-05T01:03:51,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=231 2024-12-05T01:03:51,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0e859b293ab81cb746413a04322b9d0, UNASSIGN in 188 msec 2024-12-05T01:03:51,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=230 2024-12-05T01:03:51,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=230, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 191 msec 2024-12-05T01:03:51,320 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360631320"}]},"ts":"1733360631320"} 2024-12-05T01:03:51,321 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-05T01:03:51,321 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-05T01:03:51,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 203 msec 2024-12-05T01:03:51,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-05T01:03:51,442 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-05T01:03:51,442 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-05T01:03:51,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T01:03:51,444 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T01:03:51,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-05T01:03:51,445 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=236, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T01:03:51,445 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32927 {}] ipc.CallRunner(138): callId: 406 service: ClientService methodName: Mutate size: 159 connection: 172.17.0.2:55999 deadline: 1733360691445, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=41191 startCode=1733360288910. As of locationSeqNum=28. 2024-12-05T01:03:51,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2 , the old value is region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=41191 startCode=1733360288910. As of locationSeqNum=28. 2024-12-05T01:03:51,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=41191 startCode=1733360288910. As of locationSeqNum=28. 2024-12-05T01:03:51,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,32927,1733360288712, seqNum=2 with the new location region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,41191,1733360288910, seqNum=28 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=41191 startCode=1733360288910. As of locationSeqNum=28. 2024-12-05T01:03:51,457 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:03:51,457 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831 2024-12-05T01:03:51,460 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/recovered.edits] 2024-12-05T01:03:51,460 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/recovered.edits] 2024-12-05T01:03:51,464 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/cf/a44df0633134492db404d4b377f4a10b 2024-12-05T01:03:51,465 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/cf/423add9b8a414df1b2aeee16ff1e1847 2024-12-05T01:03:51,468 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831/recovered.edits/9.seqid 2024-12-05T01:03:51,468 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0/recovered.edits/9.seqid 2024-12-05T01:03:51,469 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/7c34576004741106869ac4fb0de4f831 2024-12-05T01:03:51,469 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportWithChecksum/c0e859b293ab81cb746413a04322b9d0 2024-12-05T01:03:51,469 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-05T01:03:51,471 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=236, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T01:03:51,479 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-05T01:03:51,482 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-05T01:03:51,484 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=236, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T01:03:51,484 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-05T01:03:51,484 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360631484"}]},"ts":"9223372036854775807"} 2024-12-05T01:03:51,484 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360631484"}]},"ts":"9223372036854775807"} 2024-12-05T01:03:51,486 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T01:03:51,486 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => c0e859b293ab81cb746413a04322b9d0, NAME => 'testtb-testExportWithChecksum,,1733360571630.c0e859b293ab81cb746413a04322b9d0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 7c34576004741106869ac4fb0de4f831, NAME => 'testtb-testExportWithChecksum,1,1733360571630.7c34576004741106869ac4fb0de4f831.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T01:03:51,487 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-05T01:03:51,487 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360631487"}]},"ts":"9223372036854775807"} 2024-12-05T01:03:51,488 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-05T01:03:51,489 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=236, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T01:03:51,490 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 46 msec 2024-12-05T01:03:51,562 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:03:51,563 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33377, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:03:51,566 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41191 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:03:51,566 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41191 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,44435,1733360288855, seqNum=-1] 2024-12-05T01:03:51,566 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41191 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:03:51,567 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48311, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-05T01:03:51,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,41191,1733360288910, seqNum=32] 2024-12-05T01:03:51,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:03:51,572 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40005, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-05T01:03:51,574 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41191 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-05T01:03:51,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:51,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:51,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:51,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:51,584 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-05T01:03:51,584 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-05T01:03:51,584 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-05T01:03:51,584 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-05T01:03:51,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:51,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:51,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:03:51,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:03:51,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:51,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:03:51,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T01:03:51,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:03:51,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-05T01:03:51,587 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-05T01:03:51,587 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-05T01:03:51,588 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:51,588 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:51,588 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:51,588 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:51,594 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-05T01:03:51,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-05T01:03:51,596 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-05T01:03:51,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-05T01:03:51,620 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=824 (was 811) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1083614010_1 at /127.0.0.1:47170 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_911848752_22 at /127.0.0.1:44112 [Receiving block BP-1741140594-172.17.0.2-1733360283702:blk_1073742326_1502] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:48880 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-8084 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1166350362_22 at /127.0.0.1:46650 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_911848752_22 at /127.0.0.1:33596 [Receiving block BP-1741140594-172.17.0.2-1733360283702:blk_1073742326_1502] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 14362) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1741140594-172.17.0.2-1733360283702:blk_1073742326_1502, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_911848752_22 at /127.0.0.1:48682 [Receiving block BP-1741140594-172.17.0.2-1733360283702:blk_1073742326_1502] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1741140594-172.17.0.2-1733360283702:blk_1073742326_1502, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:39498 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39515 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS_CLOSE_META-regionserver/2113c16e5528:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1083614010_1 at /127.0.0.1:45978 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1741140594-172.17.0.2-1733360283702:blk_1073742326_1502, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:39515 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11-prefix:2113c16e5528,44435,1733360288855.meta java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=843 (was 818) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=767 (was 533) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=4726 (was 5440) 2024-12-05T01:03:51,621 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=824 is superior to 500 2024-12-05T01:03:51,641 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=824, OpenFileDescriptor=843, MaxFileDescriptor=1048576, SystemLoadAverage=767, ProcessCount=20, AvailableMemoryMB=4721 2024-12-05T01:03:51,641 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=824 is superior to 500 2024-12-05T01:03:51,643 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T01:03:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:51,646 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T01:03:51,646 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:03:51,646 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 237 2024-12-05T01:03:51,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-05T01:03:51,647 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T01:03:51,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742368_1544 (size=418) 2024-12-05T01:03:51,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742368_1544 (size=418) 2024-12-05T01:03:51,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742368_1544 (size=418) 2024-12-05T01:03:51,659 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e2515d6c42ef02f66faaeadf8ba734e1, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:03:51,661 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 67df7793993e4d091a25423aa873fa7a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:03:51,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742369_1545 (size=79) 2024-12-05T01:03:51,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742369_1545 (size=79) 2024-12-05T01:03:51,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742369_1545 (size=79) 2024-12-05T01:03:51,689 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:03:51,689 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing e2515d6c42ef02f66faaeadf8ba734e1, disabling compactions & flushes 2024-12-05T01:03:51,689 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:51,689 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:51,689 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. after waiting 0 ms 2024-12-05T01:03:51,689 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:51,689 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:51,689 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for e2515d6c42ef02f66faaeadf8ba734e1: Waiting for close lock at 1733360631689Disabling compacts and flushes for region at 1733360631689Disabling writes for close at 1733360631689Writing region close event to WAL at 1733360631689Closed at 1733360631689 2024-12-05T01:03:51,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742370_1546 (size=79) 2024-12-05T01:03:51,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742370_1546 (size=79) 2024-12-05T01:03:51,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742370_1546 (size=79) 2024-12-05T01:03:51,699 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:03:51,699 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 67df7793993e4d091a25423aa873fa7a, disabling compactions & flushes 2024-12-05T01:03:51,699 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:03:51,699 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:03:51,699 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. after waiting 0 ms 2024-12-05T01:03:51,699 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:03:51,699 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:03:51,699 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 67df7793993e4d091a25423aa873fa7a: Waiting for close lock at 1733360631699Disabling compacts and flushes for region at 1733360631699Disabling writes for close at 1733360631699Writing region close event to WAL at 1733360631699Closed at 1733360631699 2024-12-05T01:03:51,701 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T01:03:51,701 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733360631701"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360631701"}]},"ts":"1733360631701"} 2024-12-05T01:03:51,701 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733360631701"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733360631701"}]},"ts":"1733360631701"} 2024-12-05T01:03:51,705 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T01:03:51,706 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T01:03:51,706 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360631706"}]},"ts":"1733360631706"} 2024-12-05T01:03:51,708 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-05T01:03:51,708 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {2113c16e5528=0} racks are {/default-rack=0} 2024-12-05T01:03:51,710 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T01:03:51,710 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T01:03:51,710 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T01:03:51,710 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T01:03:51,710 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T01:03:51,710 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T01:03:51,710 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T01:03:51,710 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T01:03:51,710 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T01:03:51,710 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T01:03:51,710 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2515d6c42ef02f66faaeadf8ba734e1, ASSIGN}, {pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67df7793993e4d091a25423aa873fa7a, ASSIGN}] 2024-12-05T01:03:51,711 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67df7793993e4d091a25423aa873fa7a, ASSIGN 2024-12-05T01:03:51,712 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2515d6c42ef02f66faaeadf8ba734e1, ASSIGN 2024-12-05T01:03:51,712 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67df7793993e4d091a25423aa873fa7a, ASSIGN; state=OFFLINE, location=2113c16e5528,32927,1733360288712; forceNewPlan=false, retain=false 2024-12-05T01:03:51,713 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2515d6c42ef02f66faaeadf8ba734e1, ASSIGN; state=OFFLINE, location=2113c16e5528,41191,1733360288910; forceNewPlan=false, retain=false 2024-12-05T01:03:51,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-05T01:03:51,863 INFO [2113c16e5528:33369 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T01:03:51,864 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=e2515d6c42ef02f66faaeadf8ba734e1, regionState=OPENING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:03:51,864 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=67df7793993e4d091a25423aa873fa7a, regionState=OPENING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:03:51,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2515d6c42ef02f66faaeadf8ba734e1, ASSIGN because future has completed 2024-12-05T01:03:51,866 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:03:51,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67df7793993e4d091a25423aa873fa7a, ASSIGN because future has completed 2024-12-05T01:03:51,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=241, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 67df7793993e4d091a25423aa873fa7a, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:03:51,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-05T01:03:52,021 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:52,021 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:03:52,021 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7752): Opening region: {ENCODED => e2515d6c42ef02f66faaeadf8ba734e1, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T01:03:52,021 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7752): Opening region: {ENCODED => 67df7793993e4d091a25423aa873fa7a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T01:03:52,022 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. service=AccessControlService 2024-12-05T01:03:52,022 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. service=AccessControlService 2024-12-05T01:03:52,022 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:03:52,022 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T01:03:52,022 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,022 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:03:52,022 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,022 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T01:03:52,022 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7794): checking encryption for 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,022 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7797): checking classloading for 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,022 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7794): checking encryption for e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,022 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7797): checking classloading for e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,023 INFO [StoreOpener-67df7793993e4d091a25423aa873fa7a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,023 INFO [StoreOpener-e2515d6c42ef02f66faaeadf8ba734e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,025 INFO [StoreOpener-67df7793993e4d091a25423aa873fa7a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 67df7793993e4d091a25423aa873fa7a columnFamilyName cf 2024-12-05T01:03:52,025 INFO [StoreOpener-e2515d6c42ef02f66faaeadf8ba734e1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2515d6c42ef02f66faaeadf8ba734e1 columnFamilyName cf 2024-12-05T01:03:52,025 DEBUG [StoreOpener-e2515d6c42ef02f66faaeadf8ba734e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:03:52,025 DEBUG [StoreOpener-67df7793993e4d091a25423aa873fa7a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T01:03:52,025 INFO [StoreOpener-67df7793993e4d091a25423aa873fa7a-1 {}] regionserver.HStore(327): Store=67df7793993e4d091a25423aa873fa7a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:03:52,025 INFO [StoreOpener-e2515d6c42ef02f66faaeadf8ba734e1-1 {}] regionserver.HStore(327): Store=e2515d6c42ef02f66faaeadf8ba734e1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T01:03:52,025 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1038): replaying wal for 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,025 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1038): replaying wal for e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,026 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,026 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,026 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,026 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,026 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1048): stopping wal replay for e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,026 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1060): Cleaning up temporary data for e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,026 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1048): stopping wal replay for 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,027 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1060): Cleaning up temporary data for 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,028 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1093): writing seq id for e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,028 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1093): writing seq id for 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,029 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:03:52,029 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T01:03:52,030 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1114): Opened e2515d6c42ef02f66faaeadf8ba734e1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61677042, jitterRate=-0.08094045519828796}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:03:52,030 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,030 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1114): Opened 67df7793993e4d091a25423aa873fa7a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60700302, jitterRate=-0.09549501538276672}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T01:03:52,030 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,030 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1006): Region open journal for 67df7793993e4d091a25423aa873fa7a: Running coprocessor pre-open hook at 1733360632022Writing region info on filesystem at 1733360632022Initializing all the Stores at 1733360632023 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360632023Cleaning up temporary data from old regions at 1733360632027 (+4 ms)Running coprocessor post-open hooks at 1733360632030 (+3 ms)Region opened successfully at 1733360632030 2024-12-05T01:03:52,030 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1006): Region open journal for e2515d6c42ef02f66faaeadf8ba734e1: Running coprocessor pre-open hook at 1733360632022Writing region info on filesystem at 1733360632022Initializing all the Stores at 1733360632023 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733360632023Cleaning up temporary data from old regions at 1733360632026 (+3 ms)Running coprocessor post-open hooks at 1733360632030 (+4 ms)Region opened successfully at 1733360632030 2024-12-05T01:03:52,031 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a., pid=241, masterSystemTime=1733360632019 2024-12-05T01:03:52,031 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1., pid=240, masterSystemTime=1733360632018 2024-12-05T01:03:52,033 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:03:52,033 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:03:52,033 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=67df7793993e4d091a25423aa873fa7a, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:03:52,033 DEBUG [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:52,034 INFO [RS_OPEN_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:52,034 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=e2515d6c42ef02f66faaeadf8ba734e1, regionState=OPEN, openSeqNum=2, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:03:52,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=241, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 67df7793993e4d091a25423aa873fa7a, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:03:52,036 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=240, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:03:52,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=241, resume processing ppid=239 2024-12-05T01:03:52,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; OpenRegionProcedure 67df7793993e4d091a25423aa873fa7a, server=2113c16e5528,32927,1733360288712 in 169 msec 2024-12-05T01:03:52,039 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67df7793993e4d091a25423aa873fa7a, ASSIGN in 328 msec 2024-12-05T01:03:52,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=238 2024-12-05T01:03:52,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=238, state=SUCCESS, hasLock=false; OpenRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1, server=2113c16e5528,41191,1733360288910 in 171 msec 2024-12-05T01:03:52,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=237 2024-12-05T01:03:52,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2515d6c42ef02f66faaeadf8ba734e1, ASSIGN in 329 msec 2024-12-05T01:03:52,042 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T01:03:52,042 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360632042"}]},"ts":"1733360632042"} 2024-12-05T01:03:52,043 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-05T01:03:52,044 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T01:03:52,044 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-05T01:03:52,047 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41191 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-05T01:03:52,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:03:52,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:03:52,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:03:52,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:03:52,052 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:52,052 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:52,052 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:52,052 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:52,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 408 msec 2024-12-05T01:03:52,053 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:52,053 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:52,054 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:52,054 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-05T01:03:52,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-05T01:03:52,271 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-05T01:03:52,272 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-05T01:03:52,272 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:03:52,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32927 {}] ipc.CallRunner(138): callId: 528 service: ClientService methodName: Scan size: 113 connection: 172.17.0.2:44536 deadline: 1733360692272, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=44435 startCode=1733360288855. As of locationSeqNum=211. 2024-12-05T01:03:52,273 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=44435 startCode=1733360288855. As of locationSeqNum=211. 2024-12-05T01:03:52,273 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=44435 startCode=1733360288855. As of locationSeqNum=211. 2024-12-05T01:03:52,273 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:meta,,1.1588230740, hostname=2113c16e5528,32927,1733360288712, seqNum=-1 with the new location region=hbase:meta,,1.1588230740, hostname=2113c16e5528,44435,1733360288855, seqNum=211 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2113c16e5528 port=44435 startCode=1733360288855. As of locationSeqNum=211. 2024-12-05T01:03:52,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-05T01:03:52,387 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:03:52,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-05T01:03:52,387 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T01:03:52,390 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-05T01:03:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360632390 (current time:1733360632390). 2024-12-05T01:03:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:03:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-05T01:03:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:03:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a3edb4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:03:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:03:52,392 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:03:52,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:03:52,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:03:52,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60a4058e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:52,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:03:52,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:03:52,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:52,393 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53164, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:03:52,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e251275, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:52,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:03:52,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,44435,1733360288855, seqNum=-1] 2024-12-05T01:03:52,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:03:52,396 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55952, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:03:52,398 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:03:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:03:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c82e4d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:03:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:03:52,400 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:03:52,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:03:52,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:03:52,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f783d0b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:52,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:03:52,400 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:03:52,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:03:52,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:52,401 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53188, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:03:52,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16227b74, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:03:52,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,44435,1733360288855, seqNum=-1] 2024-12-05T01:03:52,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:03:52,404 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55954, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:03:52,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,41191,1733360288910, seqNum=32] 2024-12-05T01:03:52,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:03:52,408 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51000, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:03:52,409 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:03:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:03:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-05T01:03:52,409 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:03:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:03:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-05T01:03:52,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-05T01:03:52,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-05T01:03:52,413 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:03:52,414 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:03:52,417 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:03:52,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742371_1547 (size=203) 2024-12-05T01:03:52,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742371_1547 (size=203) 2024-12-05T01:03:52,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742371_1547 (size=203) 2024-12-05T01:03:52,433 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:03:52,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67df7793993e4d091a25423aa873fa7a}] 2024-12-05T01:03:52,434 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,435 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-05T01:03:52,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-12-05T01:03:52,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:52,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for e2515d6c42ef02f66faaeadf8ba734e1: 2024-12-05T01:03:52,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-05T01:03:52,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-12-05T01:03:52,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:52,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:03:52,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:03:52,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:03:52,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for 67df7793993e4d091a25423aa873fa7a: 2024-12-05T01:03:52,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-05T01:03:52,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:52,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:03:52,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T01:03:52,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742372_1548 (size=82) 2024-12-05T01:03:52,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742372_1548 (size=82) 2024-12-05T01:03:52,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742372_1548 (size=82) 2024-12-05T01:03:52,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742373_1549 (size=82) 2024-12-05T01:03:52,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742373_1549 (size=82) 2024-12-05T01:03:52,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742373_1549 (size=82) 2024-12-05T01:03:52,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:03:52,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-12-05T01:03:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-12-05T01:03:52,610 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,611 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:52,612 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 67df7793993e4d091a25423aa873fa7a in 178 msec 2024-12-05T01:03:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-05T01:03:52,996 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:52,996 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-12-05T01:03:52,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-12-05T01:03:52,997 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,997 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:52,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=243, resume processing ppid=242 2024-12-05T01:03:52,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1 in 565 msec 2024-12-05T01:03:52,999 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:03:53,000 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:03:53,001 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:03:53,001 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,001 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742374_1550 (size=585) 2024-12-05T01:03:53,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742374_1550 (size=585) 2024-12-05T01:03:53,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742374_1550 (size=585) 2024-12-05T01:03:53,018 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:03:53,022 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:03:53,022 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,024 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:03:53,024 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-05T01:03:53,025 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 614 msec 2024-12-05T01:03:53,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-05T01:03:53,042 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-05T01:03:53,046 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='0aef0434e39b061c605e3456c58737487', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1., hostname=2113c16e5528,41191,1733360288910, seqNum=2] 2024-12-05T01:03:53,047 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='1185c6345785cb28812b0d7098d6223cd', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:03:53,048 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='2fcdc2bb7fb5619089eea972dd27e49fe', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:03:53,050 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='34fa7b20674f30e29561e0c9bb0f5d273', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:03:53,050 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='42f8d055b5518b0b5950732315940ad1b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:03:53,051 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='5d559103ec2d26e21670c09451f29eadc', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:03:53,052 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='6d0c0fa4e9dc50a900090526700503b4e', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a., hostname=2113c16e5528,32927,1733360288712, seqNum=2] 2024-12-05T01:03:53,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41191 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:03:53,054 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32927 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T01:03:53,055 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T01:03:53,058 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,058 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:53,058 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T01:03:53,060 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T01:03:53,065 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T01:03:53,070 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T01:03:53,072 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-05T01:03:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733360633072 (current time:1733360633072). 2024-12-05T01:03:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T01:03:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-05T01:03:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T01:03:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33d7d2fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:03:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:03:53,074 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:03:53,074 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:03:53,074 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:03:53,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d1d497f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:53,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:03:53,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:03:53,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:53,076 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53212, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:03:53,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@329d81d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:53,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:03:53,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,44435,1733360288855, seqNum=-1] 2024-12-05T01:03:53,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:03:53,078 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55956, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:03:53,079 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:03:53,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:03:53,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:53,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:53,080 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:03:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cf1d0d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ClusterIdFetcher(90): Going to request 2113c16e5528,33369,-1 for getting cluster id 2024-12-05T01:03:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T01:03:53,081 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e24f6832-226f-46b1-8a64-09e1d59fa392' 2024-12-05T01:03:53,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T01:03:53,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e24f6832-226f-46b1-8a64-09e1d59fa392" 2024-12-05T01:03:53,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@706b14d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:53,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2113c16e5528,33369,-1] 2024-12-05T01:03:53,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T01:03:53,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:53,082 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53216, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T01:03:53,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b61b358, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T01:03:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T01:03:53,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2113c16e5528,44435,1733360288855, seqNum=-1] 2024-12-05T01:03:53,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:03:53,085 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55958, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:03:53,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., hostname=2113c16e5528,41191,1733360288910, seqNum=32] 2024-12-05T01:03:53,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T01:03:53,088 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51010, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T01:03:53,089 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369. 2024-12-05T01:03:53,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T01:03:53,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:53,089 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:03:53,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:03:53,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-05T01:03:53,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T01:03:53,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-05T01:03:53,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-12-05T01:03:53,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-05T01:03:53,092 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T01:03:53,092 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T01:03:53,094 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T01:03:53,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742375_1551 (size=198) 2024-12-05T01:03:53,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742375_1551 (size=198) 2024-12-05T01:03:53,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742375_1551 (size=198) 2024-12-05T01:03:53,103 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T01:03:53,103 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1}, {pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67df7793993e4d091a25423aa873fa7a}] 2024-12-05T01:03:53,104 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:53,104 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:53,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-05T01:03:53,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41191 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=246 2024-12-05T01:03:53,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32927 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=247 2024-12-05T01:03:53,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:53,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:03:53,258 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2902): Flushing e2515d6c42ef02f66faaeadf8ba734e1 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-05T01:03:53,258 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2902): Flushing 67df7793993e4d091a25423aa873fa7a 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-05T01:03:53,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/.tmp/cf/5f2ed8c3cdb14e75b3a85bc6c0e503bb is 71, key is 064fc071f74c97d362f37d5d0770c392/cf:q/1733360633052/Put/seqid=0 2024-12-05T01:03:53,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/.tmp/cf/400df223d79c40408372a6fb3f7465d1 is 71, key is 108e4000df1fd4d993577c3bddd5a9b5/cf:q/1733360633054/Put/seqid=0 2024-12-05T01:03:53,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742376_1552 (size=5288) 2024-12-05T01:03:53,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742376_1552 (size=5288) 2024-12-05T01:03:53,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742376_1552 (size=5288) 2024-12-05T01:03:53,303 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/.tmp/cf/5f2ed8c3cdb14e75b3a85bc6c0e503bb 2024-12-05T01:03:53,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/.tmp/cf/5f2ed8c3cdb14e75b3a85bc6c0e503bb as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/cf/5f2ed8c3cdb14e75b3a85bc6c0e503bb 2024-12-05T01:03:53,315 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/cf/5f2ed8c3cdb14e75b3a85bc6c0e503bb, entries=3, sequenceid=6, filesize=5.2 K 2024-12-05T01:03:53,316 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for e2515d6c42ef02f66faaeadf8ba734e1 in 58ms, sequenceid=6, compaction requested=false 2024-12-05T01:03:53,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-05T01:03:53,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2603): Flush status journal for e2515d6c42ef02f66faaeadf8ba734e1: 2024-12-05T01:03:53,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-05T01:03:53,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:03:53,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/cf/5f2ed8c3cdb14e75b3a85bc6c0e503bb] hfiles 2024-12-05T01:03:53,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/cf/5f2ed8c3cdb14e75b3a85bc6c0e503bb for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742377_1553 (size=8324) 2024-12-05T01:03:53,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742377_1553 (size=8324) 2024-12-05T01:03:53,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742377_1553 (size=8324) 2024-12-05T01:03:53,326 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/.tmp/cf/400df223d79c40408372a6fb3f7465d1 2024-12-05T01:03:53,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742378_1554 (size=121) 2024-12-05T01:03:53,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742378_1554 (size=121) 2024-12-05T01:03:53,331 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:03:53,331 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=246 2024-12-05T01:03:53,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742378_1554 (size=121) 2024-12-05T01:03:53,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/.tmp/cf/400df223d79c40408372a6fb3f7465d1 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/cf/400df223d79c40408372a6fb3f7465d1 2024-12-05T01:03:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=246 2024-12-05T01:03:53,332 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:53,332 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:03:53,335 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1 in 230 msec 2024-12-05T01:03:53,337 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/cf/400df223d79c40408372a6fb3f7465d1, entries=47, sequenceid=6, filesize=8.1 K 2024-12-05T01:03:53,337 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 67df7793993e4d091a25423aa873fa7a in 79ms, sequenceid=6, compaction requested=false 2024-12-05T01:03:53,337 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2603): Flush status journal for 67df7793993e4d091a25423aa873fa7a: 2024-12-05T01:03:53,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-05T01:03:53,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T01:03:53,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/cf/400df223d79c40408372a6fb3f7465d1] hfiles 2024-12-05T01:03:53,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/cf/400df223d79c40408372a6fb3f7465d1 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742379_1555 (size=121) 2024-12-05T01:03:53,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742379_1555 (size=121) 2024-12-05T01:03:53,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742379_1555 (size=121) 2024-12-05T01:03:53,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:03:53,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2113c16e5528:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=247 2024-12-05T01:03:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster(4169): Remote procedure done, pid=247 2024-12-05T01:03:53,347 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:53,347 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:03:53,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=247, resume processing ppid=245 2024-12-05T01:03:53,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 67df7793993e4d091a25423aa873fa7a in 245 msec 2024-12-05T01:03:53,349 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T01:03:53,350 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T01:03:53,350 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T01:03:53,350 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,351 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742380_1556 (size=663) 2024-12-05T01:03:53,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742380_1556 (size=663) 2024-12-05T01:03:53,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742380_1556 (size=663) 2024-12-05T01:03:53,364 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T01:03:53,368 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T01:03:53,369 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,370 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T01:03:53,370 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-12-05T01:03:53,371 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 280 msec 2024-12-05T01:03:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-05T01:03:53,412 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-05T01:03:53,412 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360633412 2024-12-05T01:03:53,412 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37149, tgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360633412, rawTgtDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360633412, srcFsUri=hdfs://localhost:37149, srcDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:03:53,441 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37149, inputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11 2024-12-05T01:03:53,441 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360633412, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360633412/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,443 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T01:03:53,446 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360633412/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:53,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742381_1557 (size=663) 2024-12-05T01:03:53,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742381_1557 (size=663) 2024-12-05T01:03:53,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742381_1557 (size=663) 2024-12-05T01:03:53,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742382_1558 (size=198) 2024-12-05T01:03:53,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742382_1558 (size=198) 2024-12-05T01:03:53,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742382_1558 (size=198) 2024-12-05T01:03:53,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:53,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:53,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:54,447 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-11976741605105224931.jar 2024-12-05T01:03:54,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:54,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:54,514 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop-7203588138453801150.jar 2024-12-05T01:03:54,514 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:54,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:54,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:54,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:54,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:54,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-05T01:03:54,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T01:03:54,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T01:03:54,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T01:03:54,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T01:03:54,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T01:03:54,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T01:03:54,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T01:03:54,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T01:03:54,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T01:03:54,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T01:03:54,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T01:03:54,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:03:54,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:03:54,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:03:54,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:03:54,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T01:03:54,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:03:54,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T01:03:54,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742383_1559 (size=24020) 2024-12-05T01:03:54,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742383_1559 (size=24020) 2024-12-05T01:03:54,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742383_1559 (size=24020) 2024-12-05T01:03:54,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742384_1560 (size=77755) 2024-12-05T01:03:54,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742384_1560 (size=77755) 2024-12-05T01:03:54,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742384_1560 (size=77755) 2024-12-05T01:03:54,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742385_1561 (size=131360) 2024-12-05T01:03:54,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742385_1561 (size=131360) 2024-12-05T01:03:54,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742385_1561 (size=131360) 2024-12-05T01:03:54,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742386_1562 (size=111793) 2024-12-05T01:03:54,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742386_1562 (size=111793) 2024-12-05T01:03:54,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742386_1562 (size=111793) 2024-12-05T01:03:54,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742387_1563 (size=1832290) 2024-12-05T01:03:54,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742387_1563 (size=1832290) 2024-12-05T01:03:54,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742387_1563 (size=1832290) 2024-12-05T01:03:54,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742388_1564 (size=6424744) 2024-12-05T01:03:54,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742388_1564 (size=6424744) 2024-12-05T01:03:54,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742388_1564 (size=6424744) 2024-12-05T01:03:54,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742389_1565 (size=8360282) 2024-12-05T01:03:54,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742389_1565 (size=8360282) 2024-12-05T01:03:54,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742389_1565 (size=8360282) 2024-12-05T01:03:54,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742390_1566 (size=443171) 2024-12-05T01:03:54,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742390_1566 (size=443171) 2024-12-05T01:03:54,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742390_1566 (size=443171) 2024-12-05T01:03:54,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742391_1567 (size=503880) 2024-12-05T01:03:54,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742391_1567 (size=503880) 2024-12-05T01:03:54,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742391_1567 (size=503880) 2024-12-05T01:03:54,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742392_1568 (size=322274) 2024-12-05T01:03:54,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742392_1568 (size=322274) 2024-12-05T01:03:54,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742392_1568 (size=322274) 2024-12-05T01:03:54,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742393_1569 (size=20406) 2024-12-05T01:03:54,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742393_1569 (size=20406) 2024-12-05T01:03:54,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742393_1569 (size=20406) 2024-12-05T01:03:54,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742394_1570 (size=45609) 2024-12-05T01:03:54,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742394_1570 (size=45609) 2024-12-05T01:03:54,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742394_1570 (size=45609) 2024-12-05T01:03:54,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742395_1571 (size=136454) 2024-12-05T01:03:54,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742395_1571 (size=136454) 2024-12-05T01:03:54,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742395_1571 (size=136454) 2024-12-05T01:03:54,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742396_1572 (size=1597136) 2024-12-05T01:03:54,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742396_1572 (size=1597136) 2024-12-05T01:03:54,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742396_1572 (size=1597136) 2024-12-05T01:03:54,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742397_1573 (size=30873) 2024-12-05T01:03:54,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742397_1573 (size=30873) 2024-12-05T01:03:54,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742397_1573 (size=30873) 2024-12-05T01:03:54,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742398_1574 (size=29229) 2024-12-05T01:03:54,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742398_1574 (size=29229) 2024-12-05T01:03:54,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742398_1574 (size=29229) 2024-12-05T01:03:54,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742399_1575 (size=903858) 2024-12-05T01:03:54,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742399_1575 (size=903858) 2024-12-05T01:03:54,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742399_1575 (size=903858) 2024-12-05T01:03:54,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742400_1576 (size=5175431) 2024-12-05T01:03:54,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742400_1576 (size=5175431) 2024-12-05T01:03:54,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742400_1576 (size=5175431) 2024-12-05T01:03:54,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742401_1577 (size=232881) 2024-12-05T01:03:54,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742401_1577 (size=232881) 2024-12-05T01:03:54,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742401_1577 (size=232881) 2024-12-05T01:03:54,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742402_1578 (size=1323991) 2024-12-05T01:03:54,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742402_1578 (size=1323991) 2024-12-05T01:03:54,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742402_1578 (size=1323991) 2024-12-05T01:03:54,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742403_1579 (size=4695811) 2024-12-05T01:03:54,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742403_1579 (size=4695811) 2024-12-05T01:03:54,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742403_1579 (size=4695811) 2024-12-05T01:03:54,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742404_1580 (size=1877034) 2024-12-05T01:03:54,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742404_1580 (size=1877034) 2024-12-05T01:03:54,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742404_1580 (size=1877034) 2024-12-05T01:03:54,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742405_1581 (size=217555) 2024-12-05T01:03:54,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742405_1581 (size=217555) 2024-12-05T01:03:54,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742405_1581 (size=217555) 2024-12-05T01:03:54,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742406_1582 (size=4188619) 2024-12-05T01:03:54,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742406_1582 (size=4188619) 2024-12-05T01:03:54,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742406_1582 (size=4188619) 2024-12-05T01:03:54,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742407_1583 (size=127628) 2024-12-05T01:03:54,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742407_1583 (size=127628) 2024-12-05T01:03:54,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742407_1583 (size=127628) 2024-12-05T01:03:54,814 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T01:03:54,816 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-05T01:03:54,817 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-05T01:03:54,817 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-05T01:03:54,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742408_1584 (size=469) 2024-12-05T01:03:54,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742408_1584 (size=469) 2024-12-05T01:03:54,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742408_1584 (size=469) 2024-12-05T01:03:54,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742409_1585 (size=21) 2024-12-05T01:03:54,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742409_1585 (size=21) 2024-12-05T01:03:54,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742409_1585 (size=21) 2024-12-05T01:03:54,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742410_1586 (size=304253) 2024-12-05T01:03:54,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742410_1586 (size=304253) 2024-12-05T01:03:54,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742410_1586 (size=304253) 2024-12-05T01:03:55,217 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:03:55,217 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T01:03:55,220 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0010_000001 (auth:SIMPLE) from 127.0.0.1:57000 2024-12-05T01:03:55,232 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0010/container_1733360294895_0010_01_000001/launch_container.sh] 2024-12-05T01:03:55,232 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0010/container_1733360294895_0010_01_000001/container_tokens] 2024-12-05T01:03:55,232 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_2/usercache/jenkins/appcache/application_1733360294895_0010/container_1733360294895_0010_01_000001/sysfs] 2024-12-05T01:03:55,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2529): Skipping flush on hbase:meta,,1.1588230740 because closing 2024-12-05T01:03:56,119 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0011_000001 (auth:SIMPLE) from 127.0.0.1:40590 2024-12-05T01:03:56,846 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:03:57,824 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 42720 2024-12-05T01:03:58,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:03:58,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-05T01:03:58,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-05T01:03:58,987 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 29254d26df66b48412cf870ed83cb900, had cached 0 bytes from a total of 5791 2024-12-05T01:03:59,278 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 880916148c83f94d943041b7127cffb7, had cached 0 bytes from a total of 5422 2024-12-05T01:03:59,278 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8bbf90773c039aafe370b8325f074c96, had cached 0 bytes from a total of 8188 2024-12-05T01:04:03,807 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:04:05,155 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0011_000001 (auth:SIMPLE) from 127.0.0.1:46794 2024-12-05T01:04:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742411_1587 (size=349975) 2024-12-05T01:04:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742411_1587 (size=349975) 2024-12-05T01:04:05,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742411_1587 (size=349975) 2024-12-05T01:04:06,698 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:04:07,377 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0011_000001 (auth:SIMPLE) from 127.0.0.1:54210 2024-12-05T01:04:07,378 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0011_000001 (auth:SIMPLE) from 127.0.0.1:41954 2024-12-05T01:04:11,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742412_1588 (size=5288) 2024-12-05T01:04:11,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742412_1588 (size=5288) 2024-12-05T01:04:11,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742412_1588 (size=5288) 2024-12-05T01:04:11,172 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0011/container_1733360294895_0011_01_000003/launch_container.sh] 2024-12-05T01:04:11,172 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0011/container_1733360294895_0011_01_000003/container_tokens] 2024-12-05T01:04:11,173 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_3/usercache/jenkins/appcache/application_1733360294895_0011/container_1733360294895_0011_01_000003/sysfs] 2024-12-05T01:04:11,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742414_1590 (size=8324) 2024-12-05T01:04:11,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742414_1590 (size=8324) 2024-12-05T01:04:11,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742414_1590 (size=8324) 2024-12-05T01:04:11,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742413_1589 (size=22220) 2024-12-05T01:04:11,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742413_1589 (size=22220) 2024-12-05T01:04:11,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742413_1589 (size=22220) 2024-12-05T01:04:11,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742415_1591 (size=476) 2024-12-05T01:04:11,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742415_1591 (size=476) 2024-12-05T01:04:11,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742415_1591 (size=476) 2024-12-05T01:04:11,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742416_1592 (size=22220) 2024-12-05T01:04:11,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742416_1592 (size=22220) 2024-12-05T01:04:11,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742416_1592 (size=22220) 2024-12-05T01:04:11,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742417_1593 (size=349975) 2024-12-05T01:04:11,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742417_1593 (size=349975) 2024-12-05T01:04:11,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742417_1593 (size=349975) 2024-12-05T01:04:11,956 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733360294895_0011_000001 (auth:SIMPLE) from 127.0.0.1:37738 2024-12-05T01:04:11,996 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 67df7793993e4d091a25423aa873fa7a changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:04:11,996 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 29254d26df66b48412cf870ed83cb900 changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:04:11,996 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e2515d6c42ef02f66faaeadf8ba734e1 changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:04:11,996 DEBUG [master/2113c16e5528:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-05T01:04:13,988 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T01:04:13,988 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T01:04:13,993 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:13,994 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T01:04:13,994 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T01:04:13,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:13,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-05T01:04:13,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-05T01:04:13,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_720474631_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360633412/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360633412/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:13,995 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360633412/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-05T01:04:13,995 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/export-test/export-1733360633412/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-05T01:04:14,000 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=248, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-05T01:04:14,003 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360654003"}]},"ts":"1733360654003"} 2024-12-05T01:04:14,005 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-05T01:04:14,005 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-05T01:04:14,006 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=248, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-05T01:04:14,008 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2515d6c42ef02f66faaeadf8ba734e1, UNASSIGN}, {pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67df7793993e4d091a25423aa873fa7a, UNASSIGN}] 2024-12-05T01:04:14,008 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=2113c16e5528,41191,1733360288910, table=testtb-testExportFileSystemStateWithSkipTmp, region=e2515d6c42ef02f66faaeadf8ba734e1. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-05T01:04:14,009 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67df7793993e4d091a25423aa873fa7a, UNASSIGN 2024-12-05T01:04:14,009 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2515d6c42ef02f66faaeadf8ba734e1, UNASSIGN 2024-12-05T01:04:14,010 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=e2515d6c42ef02f66faaeadf8ba734e1, regionState=CLOSING, regionLocation=2113c16e5528,41191,1733360288910 2024-12-05T01:04:14,010 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=67df7793993e4d091a25423aa873fa7a, regionState=CLOSING, regionLocation=2113c16e5528,32927,1733360288712 2024-12-05T01:04:14,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2515d6c42ef02f66faaeadf8ba734e1, UNASSIGN because future has completed 2024-12-05T01:04:14,012 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:04:14,012 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=252, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1, server=2113c16e5528,41191,1733360288910}] 2024-12-05T01:04:14,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67df7793993e4d091a25423aa873fa7a, UNASSIGN because future has completed 2024-12-05T01:04:14,012 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T01:04:14,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=253, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 67df7793993e4d091a25423aa873fa7a, server=2113c16e5528,32927,1733360288712}] 2024-12-05T01:04:14,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-05T01:04:14,164 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(122): Close e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:04:14,164 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:04:14,165 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1722): Closing e2515d6c42ef02f66faaeadf8ba734e1, disabling compactions & flushes 2024-12-05T01:04:14,165 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:04:14,165 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:04:14,165 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. after waiting 0 ms 2024-12-05T01:04:14,165 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:04:14,165 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(122): Close 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:04:14,165 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T01:04:14,165 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1722): Closing 67df7793993e4d091a25423aa873fa7a, disabling compactions & flushes 2024-12-05T01:04:14,165 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:04:14,165 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:04:14,165 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. after waiting 0 ms 2024-12-05T01:04:14,165 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:04:14,169 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:04:14,169 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T01:04:14,169 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:14,169 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:14,169 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1. 2024-12-05T01:04:14,169 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a. 2024-12-05T01:04:14,170 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1676): Region close journal for 67df7793993e4d091a25423aa873fa7a: Waiting for close lock at 1733360654165Running coprocessor pre-close hooks at 1733360654165Disabling compacts and flushes for region at 1733360654165Disabling writes for close at 1733360654165Writing region close event to WAL at 1733360654166 (+1 ms)Running coprocessor post-close hooks at 1733360654169 (+3 ms)Closed at 1733360654169 2024-12-05T01:04:14,170 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1676): Region close journal for e2515d6c42ef02f66faaeadf8ba734e1: Waiting for close lock at 1733360654164Running coprocessor pre-close hooks at 1733360654164Disabling compacts and flushes for region at 1733360654164Disabling writes for close at 1733360654165 (+1 ms)Writing region close event to WAL at 1733360654165Running coprocessor post-close hooks at 1733360654169 (+4 ms)Closed at 1733360654169 2024-12-05T01:04:14,172 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(157): Closed 67df7793993e4d091a25423aa873fa7a 2024-12-05T01:04:14,172 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=67df7793993e4d091a25423aa873fa7a, regionState=CLOSED 2024-12-05T01:04:14,173 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(157): Closed e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:04:14,173 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=e2515d6c42ef02f66faaeadf8ba734e1, regionState=CLOSED 2024-12-05T01:04:14,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=253, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 67df7793993e4d091a25423aa873fa7a, server=2113c16e5528,32927,1733360288712 because future has completed 2024-12-05T01:04:14,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=252, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1, server=2113c16e5528,41191,1733360288910 because future has completed 2024-12-05T01:04:14,176 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=253, resume processing ppid=251 2024-12-05T01:04:14,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=253, ppid=251, state=SUCCESS, hasLock=false; CloseRegionProcedure 67df7793993e4d091a25423aa873fa7a, server=2113c16e5528,32927,1733360288712 in 163 msec 2024-12-05T01:04:14,178 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=252, resume processing ppid=250 2024-12-05T01:04:14,178 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=252, ppid=250, state=SUCCESS, hasLock=false; CloseRegionProcedure e2515d6c42ef02f66faaeadf8ba734e1, server=2113c16e5528,41191,1733360288910 in 164 msec 2024-12-05T01:04:14,178 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67df7793993e4d091a25423aa873fa7a, UNASSIGN in 168 msec 2024-12-05T01:04:14,179 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=250, resume processing ppid=249 2024-12-05T01:04:14,179 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2515d6c42ef02f66faaeadf8ba734e1, UNASSIGN in 170 msec 2024-12-05T01:04:14,182 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=248 2024-12-05T01:04:14,182 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=248, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 174 msec 2024-12-05T01:04:14,183 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733360654183"}]},"ts":"1733360654183"} 2024-12-05T01:04:14,184 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-05T01:04:14,184 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-05T01:04:14,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 185 msec 2024-12-05T01:04:14,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-05T01:04:14,322 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-05T01:04:14,322 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] procedure2.ProcedureExecutor(1139): Stored pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,325 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,326 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=254, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,330 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:04:14,331 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41191 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,332 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/recovered.edits] 2024-12-05T01:04:14,335 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/cf/5f2ed8c3cdb14e75b3a85bc6c0e503bb to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/cf/5f2ed8c3cdb14e75b3a85bc6c0e503bb 2024-12-05T01:04:14,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,337 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-05T01:04:14,337 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-05T01:04:14,337 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-05T01:04:14,337 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-05T01:04:14,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:04:14,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:04:14,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:04:14,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T01:04:14,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=254 2024-12-05T01:04:14,339 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:04:14,340 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:04:14,340 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a 2024-12-05T01:04:14,342 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1/recovered.edits/9.seqid 2024-12-05T01:04:14,342 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:04:14,342 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T01:04:14,343 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/cf, FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/recovered.edits] 2024-12-05T01:04:14,343 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2515d6c42ef02f66faaeadf8ba734e1 2024-12-05T01:04:14,345 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/cf/400df223d79c40408372a6fb3f7465d1 to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/cf/400df223d79c40408372a6fb3f7465d1 2024-12-05T01:04:14,348 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/recovered.edits/9.seqid to hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a/recovered.edits/9.seqid 2024-12-05T01:04:14,348 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testtb-testExportFileSystemStateWithSkipTmp/67df7793993e4d091a25423aa873fa7a 2024-12-05T01:04:14,349 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-05T01:04:14,351 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=254, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,354 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-05T01:04:14,356 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-05T01:04:14,357 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=254, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,358 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-05T01:04:14,358 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360654358"}]},"ts":"9223372036854775807"} 2024-12-05T01:04:14,358 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733360654358"}]},"ts":"9223372036854775807"} 2024-12-05T01:04:14,360 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T01:04:14,360 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => e2515d6c42ef02f66faaeadf8ba734e1, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733360631642.e2515d6c42ef02f66faaeadf8ba734e1.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 67df7793993e4d091a25423aa873fa7a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T01:04:14,360 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-05T01:04:14,361 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733360654360"}]},"ts":"9223372036854775807"} 2024-12-05T01:04:14,362 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-05T01:04:14,363 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=254, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=254, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 40 msec 2024-12-05T01:04:14,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=254 2024-12-05T01:04:14,442 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,442 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-05T01:04:14,448 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-05T01:04:14,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,450 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-05T01:04:14,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:14,476 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=833 (was 824) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34877 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1562846184_22 at /127.0.0.1:42706 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-8831 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:56182 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:46208 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:42708 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 18123) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:56174 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:34877 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-894754792_1 at /127.0.0.1:46194 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-894754792_1 at /127.0.0.1:42664 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_720474631_22 at /127.0.0.1:42694 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=845 (was 843) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=786 (was 767) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 20), AvailableMemoryMB=4535 (was 4721) 2024-12-05T01:04:14,476 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=833 is superior to 500 2024-12-05T01:04:14,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-05T01:04:14,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@375a1889{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-05T01:04:14,488 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5892eb52{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T01:04:14,488 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T01:04:14,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12959846{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-05T01:04:14,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1042ee3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,STOPPED} 2024-12-05T01:04:14,507 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733360294895_0011_01_000001 is : 143 2024-12-05T01:04:14,517 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_1/usercache/jenkins/appcache/application_1733360294895_0011/container_1733360294895_0011_01_000001/launch_container.sh] 2024-12-05T01:04:14,517 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_1/usercache/jenkins/appcache/application_1733360294895_0011/container_1733360294895_0011_01_000001/container_tokens] 2024-12-05T01:04:14,517 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-1_1/usercache/jenkins/appcache/application_1733360294895_0011/container_1733360294895_0011_01_000001/sysfs] 2024-12-05T01:04:17,021 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0011/container_1733360294895_0011_01_000002/launch_container.sh] 2024-12-05T01:04:17,021 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0011/container_1733360294895_0011_01_000002/container_tokens] 2024-12-05T01:04:17,021 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_838850789/yarn-6444612222/MiniMRCluster_838850789-localDir-nm-0_0/usercache/jenkins/appcache/application_1733360294895_0011/container_1733360294895_0011_01_000002/sysfs] 2024-12-05T01:04:18,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T01:04:19,707 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:04:31,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5419c6a1{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-05T01:04:31,503 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6af26d81{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T01:04:31,503 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T01:04:31,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6417a2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-05T01:04:31,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a3e1db4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,STOPPED} 2024-12-05T01:04:36,698 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:04:42,825 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 42720 2024-12-05T01:04:43,987 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 29254d26df66b48412cf870ed83cb900, had cached 0 bytes from a total of 5791 2024-12-05T01:04:44,278 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 880916148c83f94d943041b7127cffb7, had cached 0 bytes from a total of 5422 2024-12-05T01:04:44,278 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8bbf90773c039aafe370b8325f074c96, had cached 0 bytes from a total of 8188 2024-12-05T01:04:48,510 ERROR [Thread[Thread-400,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-05T01:04:48,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18514ef4{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-05T01:04:48,511 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3a9525b1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T01:04:48,511 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T01:04:48,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@269a259d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-05T01:04:48,512 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c85584d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,STOPPED} 2024-12-05T01:04:48,515 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-05T01:04:48,520 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-05T01:04:48,521 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-05T01:04:48,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741830_1006 (size=1178087) 2024-12-05T01:04:48,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741830_1006 (size=1178087) 2024-12-05T01:04:48,528 ERROR [Thread[Thread-423,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-05T01:04:48,531 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ca5085a{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-05T01:04:48,531 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@613db7da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T01:04:48,531 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T01:04:48,532 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@191ba172{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-05T01:04:48,532 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c55a185{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,STOPPED} 2024-12-05T01:04:48,533 ERROR [Thread[Thread-382,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-05T01:04:48,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-05T01:04:48,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T01:04:48,533 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T01:04:48,533 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T01:04:48,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:04:48,534 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:04:48,534 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T01:04:48,534 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1370926998, stopped=false 2024-12-05T01:04:48,534 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:48,534 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-05T01:04:48,534 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2113c16e5528,33369,1733360287836 2024-12-05T01:04:48,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T01:04:48,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T01:04:48,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T01:04:48,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T01:04:48,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T01:04:48,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T01:04:48,536 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T01:04:48,537 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T01:04:48,537 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T01:04:48,537 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T01:04:48,537 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T01:04:48,537 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T01:04:48,537 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:04:48,537 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2113c16e5528,32927,1733360288712' ***** 2024-12-05T01:04:48,538 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:48,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T01:04:48,538 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T01:04:48,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T01:04:48,538 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T01:04:48,538 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T01:04:48,538 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2113c16e5528,44435,1733360288855' ***** 2024-12-05T01:04:48,538 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:48,538 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T01:04:48,538 INFO [RS:0;2113c16e5528:32927 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T01:04:48,538 INFO [RS:0;2113c16e5528:32927 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T01:04:48,539 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T01:04:48,539 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(3091): Received CLOSE for 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:04:48,539 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(959): stopping server 2113c16e5528,32927,1733360288712 2024-12-05T01:04:48,539 INFO [RS:0;2113c16e5528:32927 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T01:04:48,539 INFO [RS:0;2113c16e5528:32927 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2113c16e5528:32927. 2024-12-05T01:04:48,539 DEBUG [RS:0;2113c16e5528:32927 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T01:04:48,539 DEBUG [RS:0;2113c16e5528:32927 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:04:48,539 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T01:04:48,539 DEBUG [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(1325): Online Regions={8bbf90773c039aafe370b8325f074c96=testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96.} 2024-12-05T01:04:48,539 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8bbf90773c039aafe370b8325f074c96, disabling compactions & flushes 2024-12-05T01:04:48,539 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:04:48,539 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:04:48,539 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. after waiting 0 ms 2024-12-05T01:04:48,539 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:04:48,540 DEBUG [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(1351): Waiting on 8bbf90773c039aafe370b8325f074c96 2024-12-05T01:04:48,540 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T01:04:48,540 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2113c16e5528,41191,1733360288910' ***** 2024-12-05T01:04:48,540 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:48,540 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T01:04:48,540 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T01:04:48,540 INFO [RS:1;2113c16e5528:44435 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T01:04:48,540 INFO [RS:1;2113c16e5528:44435 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T01:04:48,540 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T01:04:48,540 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(959): stopping server 2113c16e5528,44435,1733360288855 2024-12-05T01:04:48,540 INFO [RS:1;2113c16e5528:44435 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T01:04:48,540 INFO [RS:2;2113c16e5528:41191 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T01:04:48,540 INFO [RS:2;2113c16e5528:41191 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T01:04:48,540 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T01:04:48,540 INFO [RS:1;2113c16e5528:44435 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2113c16e5528:44435. 2024-12-05T01:04:48,540 DEBUG [RS:1;2113c16e5528:44435 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T01:04:48,540 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(3091): Received CLOSE for 880916148c83f94d943041b7127cffb7 2024-12-05T01:04:48,540 DEBUG [RS:1;2113c16e5528:44435 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:04:48,540 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(3091): Received CLOSE for 29254d26df66b48412cf870ed83cb900 2024-12-05T01:04:48,540 INFO [RS:1;2113c16e5528:44435 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T01:04:48,540 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 880916148c83f94d943041b7127cffb7, disabling compactions & flushes 2024-12-05T01:04:48,540 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(959): stopping server 2113c16e5528,41191,1733360288910 2024-12-05T01:04:48,540 INFO [RS:1;2113c16e5528:44435 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T01:04:48,540 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:04:48,540 INFO [RS:2;2113c16e5528:41191 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T01:04:48,540 INFO [RS:1;2113c16e5528:44435 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T01:04:48,540 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:04:48,540 INFO [RS:2;2113c16e5528:41191 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2113c16e5528:41191. 2024-12-05T01:04:48,540 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. after waiting 0 ms 2024-12-05T01:04:48,540 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:04:48,540 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T01:04:48,540 DEBUG [RS:2;2113c16e5528:41191 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T01:04:48,540 DEBUG [RS:2;2113c16e5528:41191 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:04:48,541 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T01:04:48,541 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T01:04:48,541 DEBUG [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(1325): Online Regions={880916148c83f94d943041b7127cffb7=testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7., 29254d26df66b48412cf870ed83cb900=hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900.} 2024-12-05T01:04:48,541 DEBUG [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(1351): Waiting on 29254d26df66b48412cf870ed83cb900, 880916148c83f94d943041b7127cffb7 2024-12-05T01:04:48,541 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T01:04:48,541 DEBUG [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-05T01:04:48,541 DEBUG [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T01:04:48,541 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T01:04:48,541 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T01:04:48,541 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T01:04:48,541 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T01:04:48,541 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T01:04:48,542 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=10.15 KB heapSize=17.11 KB 2024-12-05T01:04:48,544 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/8bbf90773c039aafe370b8325f074c96/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T01:04:48,544 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/default/testExportExpiredSnapshot/880916148c83f94d943041b7127cffb7/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T01:04:48,545 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:48,545 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:48,545 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:04:48,545 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:04:48,545 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8bbf90773c039aafe370b8325f074c96: Waiting for close lock at 1733360688539Running coprocessor pre-close hooks at 1733360688539Disabling compacts and flushes for region at 1733360688539Disabling writes for close at 1733360688539Writing region close event to WAL at 1733360688541 (+2 ms)Running coprocessor post-close hooks at 1733360688545 (+4 ms)Closed at 1733360688545 2024-12-05T01:04:48,545 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 880916148c83f94d943041b7127cffb7: Waiting for close lock at 1733360688540Running coprocessor pre-close hooks at 1733360688540Disabling compacts and flushes for region at 1733360688540Disabling writes for close at 1733360688540Writing region close event to WAL at 1733360688541 (+1 ms)Running coprocessor post-close hooks at 1733360688545 (+4 ms)Closed at 1733360688545 2024-12-05T01:04:48,545 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733360548943.8bbf90773c039aafe370b8325f074c96. 2024-12-05T01:04:48,545 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733360548943.880916148c83f94d943041b7127cffb7. 2024-12-05T01:04:48,545 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 29254d26df66b48412cf870ed83cb900, disabling compactions & flushes 2024-12-05T01:04:48,545 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:04:48,545 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:04:48,545 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. after waiting 0 ms 2024-12-05T01:04:48,545 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:04:48,545 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 29254d26df66b48412cf870ed83cb900 1/1 column families, dataSize=190 B heapSize=672 B 2024-12-05T01:04:48,548 INFO [regionserver/2113c16e5528:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T01:04:48,560 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/.tmp/l/80c5c6387e8e49eb8fbbca1bf62386c8 is 68, key is testtb-testExportFileSystemStateWithSkipTmp/l:/1733360654326/DeleteFamily/seqid=0 2024-12-05T01:04:48,564 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/info/f1feb0d6bd004d969dae731258ce217a is 131, key is hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900./info:regioninfo/1733360594017/Put/seqid=0 2024-12-05T01:04:48,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742418_1594 (size=5142) 2024-12-05T01:04:48,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742418_1594 (size=5142) 2024-12-05T01:04:48,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742418_1594 (size=5142) 2024-12-05T01:04:48,565 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=37 (bloomFilter=false), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/.tmp/l/80c5c6387e8e49eb8fbbca1bf62386c8 2024-12-05T01:04:48,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742419_1595 (size=7555) 2024-12-05T01:04:48,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742419_1595 (size=7555) 2024-12-05T01:04:48,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742419_1595 (size=7555) 2024-12-05T01:04:48,569 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.30 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/info/f1feb0d6bd004d969dae731258ce217a 2024-12-05T01:04:48,571 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 80c5c6387e8e49eb8fbbca1bf62386c8 2024-12-05T01:04:48,572 INFO [regionserver/2113c16e5528:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T01:04:48,572 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/.tmp/l/80c5c6387e8e49eb8fbbca1bf62386c8 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/l/80c5c6387e8e49eb8fbbca1bf62386c8 2024-12-05T01:04:48,576 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 80c5c6387e8e49eb8fbbca1bf62386c8 2024-12-05T01:04:48,576 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/l/80c5c6387e8e49eb8fbbca1bf62386c8, entries=2, sequenceid=37, filesize=5.0 K 2024-12-05T01:04:48,577 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 29254d26df66b48412cf870ed83cb900 in 32ms, sequenceid=37, compaction requested=false 2024-12-05T01:04:48,577 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-05T01:04:48,580 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/acl/29254d26df66b48412cf870ed83cb900/recovered.edits/40.seqid, newMaxSeqId=40, maxSeqId=31 2024-12-05T01:04:48,580 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:48,580 INFO [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:04:48,580 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 29254d26df66b48412cf870ed83cb900: Waiting for close lock at 1733360688545Running coprocessor pre-close hooks at 1733360688545Disabling compacts and flushes for region at 1733360688545Disabling writes for close at 1733360688545Obtaining lock to block concurrent updates at 1733360688545Preparing flush snapshotting stores in 29254d26df66b48412cf870ed83cb900 at 1733360688545Finished memstore snapshotting hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900., syncing WAL and waiting on mvcc, flushsize=dataSize=190, getHeapSize=656, getOffHeapSize=0, getCellsCount=3 at 1733360688546 (+1 ms)Flushing stores of hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. at 1733360688546Flushing 29254d26df66b48412cf870ed83cb900/l: creating writer at 1733360688546Flushing 29254d26df66b48412cf870ed83cb900/l: appending metadata at 1733360688560 (+14 ms)Flushing 29254d26df66b48412cf870ed83cb900/l: closing flushed file at 1733360688560Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64663385: reopening flushed file at 1733360688571 (+11 ms)Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 29254d26df66b48412cf870ed83cb900 in 32ms, sequenceid=37, compaction requested=false at 1733360688577 (+6 ms)Writing region close event to WAL at 1733360688578 (+1 ms)Running coprocessor post-close hooks at 1733360688580 (+2 ms)Closed at 1733360688580 2024-12-05T01:04:48,581 DEBUG [RS_CLOSE_REGION-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733360291417.29254d26df66b48412cf870ed83cb900. 2024-12-05T01:04:48,586 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/ns/061880a20ebe442690a5ad660d1aced6 is 119, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a./ns:/1733360654351/DeleteFamily/seqid=0 2024-12-05T01:04:48,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742420_1596 (size=5927) 2024-12-05T01:04:48,590 INFO [regionserver/2113c16e5528:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T01:04:48,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742420_1596 (size=5927) 2024-12-05T01:04:48,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742420_1596 (size=5927) 2024-12-05T01:04:48,591 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=430 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/ns/061880a20ebe442690a5ad660d1aced6 2024-12-05T01:04:48,608 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/rep_barrier/f0085278d21649808367c3bc16923454 is 128, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a./rep_barrier:/1733360654351/DeleteFamily/seqid=0 2024-12-05T01:04:48,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742421_1597 (size=5990) 2024-12-05T01:04:48,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742421_1597 (size=5990) 2024-12-05T01:04:48,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742421_1597 (size=5990) 2024-12-05T01:04:48,613 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=466 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/rep_barrier/f0085278d21649808367c3bc16923454 2024-12-05T01:04:48,630 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/table/2b83552d5861440ab272d0e729d3b893 is 122, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733360631642.67df7793993e4d091a25423aa873fa7a./table:/1733360654351/DeleteFamily/seqid=0 2024-12-05T01:04:48,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742422_1598 (size=6012) 2024-12-05T01:04:48,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742422_1598 (size=6012) 2024-12-05T01:04:48,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742422_1598 (size=6012) 2024-12-05T01:04:48,634 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=996 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/table/2b83552d5861440ab272d0e729d3b893 2024-12-05T01:04:48,638 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/info/f1feb0d6bd004d969dae731258ce217a as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/info/f1feb0d6bd004d969dae731258ce217a 2024-12-05T01:04:48,642 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/info/f1feb0d6bd004d969dae731258ce217a, entries=19, sequenceid=246, filesize=7.4 K 2024-12-05T01:04:48,642 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/ns/061880a20ebe442690a5ad660d1aced6 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/ns/061880a20ebe442690a5ad660d1aced6 2024-12-05T01:04:48,646 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/ns/061880a20ebe442690a5ad660d1aced6, entries=4, sequenceid=246, filesize=5.8 K 2024-12-05T01:04:48,647 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/rep_barrier/f0085278d21649808367c3bc16923454 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/rep_barrier/f0085278d21649808367c3bc16923454 2024-12-05T01:04:48,650 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/rep_barrier/f0085278d21649808367c3bc16923454, entries=4, sequenceid=246, filesize=5.8 K 2024-12-05T01:04:48,651 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/.tmp/table/2b83552d5861440ab272d0e729d3b893 as hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/table/2b83552d5861440ab272d0e729d3b893 2024-12-05T01:04:48,654 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/table/2b83552d5861440ab272d0e729d3b893, entries=6, sequenceid=246, filesize=5.9 K 2024-12-05T01:04:48,655 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~10.15 KB/10395, heapSize ~17.05 KB/17456, currentSize=0 B/0 for 1588230740 in 114ms, sequenceid=246, compaction requested=false 2024-12-05T01:04:48,655 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T01:04:48,659 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/data/hbase/meta/1588230740/recovered.edits/249.seqid, newMaxSeqId=249, maxSeqId=214 2024-12-05T01:04:48,659 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:48,659 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T01:04:48,660 INFO [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T01:04:48,660 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733360688541Running coprocessor pre-close hooks at 1733360688541Disabling compacts and flushes for region at 1733360688541Disabling writes for close at 1733360688541Obtaining lock to block concurrent updates at 1733360688542 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733360688542Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=10395, getHeapSize=17456, getOffHeapSize=0, getCellsCount=81 at 1733360688542Flushing stores of hbase:meta,,1.1588230740 at 1733360688542Flushing 1588230740/info: creating writer at 1733360688543 (+1 ms)Flushing 1588230740/info: appending metadata at 1733360688563 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733360688564 (+1 ms)Flushing 1588230740/ns: creating writer at 1733360688573 (+9 ms)Flushing 1588230740/ns: appending metadata at 1733360688586 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733360688586Flushing 1588230740/rep_barrier: creating writer at 1733360688594 (+8 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733360688607 (+13 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733360688607Flushing 1588230740/table: creating writer at 1733360688616 (+9 ms)Flushing 1588230740/table: appending metadata at 1733360688629 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733360688629Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f5a0aa1: reopening flushed file at 1733360688637 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21c9798a: reopening flushed file at 1733360688642 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1549095c: reopening flushed file at 1733360688646 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@396286a5: reopening flushed file at 1733360688650 (+4 ms)Finished flush of dataSize ~10.15 KB/10395, heapSize ~17.05 KB/17456, currentSize=0 B/0 for 1588230740 in 114ms, sequenceid=246, compaction requested=false at 1733360688655 (+5 ms)Writing region close event to WAL at 1733360688657 (+2 ms)Running coprocessor post-close hooks at 1733360688659 (+2 ms)Closed at 1733360688660 (+1 ms) 2024-12-05T01:04:48,660 DEBUG [RS_CLOSE_META-regionserver/2113c16e5528:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T01:04:48,740 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(976): stopping server 2113c16e5528,32927,1733360288712; all regions closed. 2024-12-05T01:04:48,741 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(976): stopping server 2113c16e5528,41191,1733360288910; all regions closed. 2024-12-05T01:04:48,741 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(976): stopping server 2113c16e5528,44435,1733360288855; all regions closed. 2024-12-05T01:04:48,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742326_1502 (size=13036) 2024-12-05T01:04:48,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741836_1012 (size=89810) 2024-12-05T01:04:48,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741835_1011 (size=13602) 2024-12-05T01:04:48,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741836_1012 (size=89810) 2024-12-05T01:04:48,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741836_1012 (size=89810) 2024-12-05T01:04:48,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741835_1011 (size=13602) 2024-12-05T01:04:48,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742326_1502 (size=13036) 2024-12-05T01:04:48,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741835_1011 (size=13602) 2024-12-05T01:04:48,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742326_1502 (size=13036) 2024-12-05T01:04:48,749 DEBUG [RS:2;2113c16e5528:41191 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/oldWALs 2024-12-05T01:04:48,749 DEBUG [RS:0;2113c16e5528:32927 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/oldWALs 2024-12-05T01:04:48,749 DEBUG [RS:1;2113c16e5528:44435 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/oldWALs 2024-12-05T01:04:48,749 INFO [RS:2;2113c16e5528:41191 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2113c16e5528%2C41191%2C1733360288910:(num 1733360290654) 2024-12-05T01:04:48,749 INFO [RS:0;2113c16e5528:32927 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2113c16e5528%2C32927%2C1733360288712.meta:.meta(num 1733360291065) 2024-12-05T01:04:48,749 INFO [RS:1;2113c16e5528:44435 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2113c16e5528%2C44435%2C1733360288855.meta:.meta(num 1733360592793) 2024-12-05T01:04:48,749 DEBUG [RS:2;2113c16e5528:41191 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:04:48,749 INFO [RS:2;2113c16e5528:41191 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T01:04:48,749 INFO [RS:2;2113c16e5528:41191 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T01:04:48,750 INFO [RS:2;2113c16e5528:41191 {}] hbase.ChoreService(370): Chore service for: regionserver/2113c16e5528:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T01:04:48,750 INFO [RS:2;2113c16e5528:41191 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T01:04:48,750 INFO [RS:2;2113c16e5528:41191 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T01:04:48,750 INFO [RS:2;2113c16e5528:41191 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T01:04:48,750 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T01:04:48,750 INFO [RS:2;2113c16e5528:41191 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T01:04:48,750 INFO [RS:2;2113c16e5528:41191 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41191 2024-12-05T01:04:48,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741834_1010 (size=10886) 2024-12-05T01:04:48,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T01:04:48,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741834_1010 (size=10886) 2024-12-05T01:04:48,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2113c16e5528,41191,1733360288910 2024-12-05T01:04:48,755 INFO [RS:2;2113c16e5528:41191 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T01:04:48,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741833_1009 (size=19658) 2024-12-05T01:04:48,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073741833_1009 (size=19658) 2024-12-05T01:04:48,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741833_1009 (size=19658) 2024-12-05T01:04:48,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073741834_1010 (size=10886) 2024-12-05T01:04:48,756 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2113c16e5528,41191,1733360288910] 2024-12-05T01:04:48,759 DEBUG [RS:1;2113c16e5528:44435 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/oldWALs 2024-12-05T01:04:48,759 INFO [RS:1;2113c16e5528:44435 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2113c16e5528%2C44435%2C1733360288855:(num 1733360290649) 2024-12-05T01:04:48,759 DEBUG [RS:1;2113c16e5528:44435 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:04:48,759 INFO [RS:1;2113c16e5528:44435 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T01:04:48,759 DEBUG [RS:0;2113c16e5528:32927 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/oldWALs 2024-12-05T01:04:48,759 INFO [RS:0;2113c16e5528:32927 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2113c16e5528%2C32927%2C1733360288712:(num 1733360290637) 2024-12-05T01:04:48,759 DEBUG [RS:0;2113c16e5528:32927 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T01:04:48,759 INFO [RS:0;2113c16e5528:32927 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T01:04:48,759 INFO [RS:1;2113c16e5528:44435 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T01:04:48,759 INFO [RS:0;2113c16e5528:32927 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T01:04:48,759 INFO [RS:1;2113c16e5528:44435 {}] hbase.ChoreService(370): Chore service for: regionserver/2113c16e5528:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T01:04:48,759 INFO [RS:0;2113c16e5528:32927 {}] hbase.ChoreService(370): Chore service for: regionserver/2113c16e5528:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T01:04:48,759 INFO [RS:1;2113c16e5528:44435 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T01:04:48,759 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T01:04:48,760 INFO [RS:0;2113c16e5528:32927 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T01:04:48,760 INFO [RS:0;2113c16e5528:32927 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T01:04:48,760 INFO [RS:0;2113c16e5528:32927 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T01:04:48,760 INFO [RS:0;2113c16e5528:32927 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T01:04:48,760 INFO [RS:1;2113c16e5528:44435 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44435 2024-12-05T01:04:48,760 INFO [regionserver/2113c16e5528:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T01:04:48,760 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2113c16e5528,41191,1733360288910 already deleted, retry=false 2024-12-05T01:04:48,761 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2113c16e5528,41191,1733360288910 expired; onlineServers=2 2024-12-05T01:04:48,761 INFO [RS:0;2113c16e5528:32927 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32927 2024-12-05T01:04:48,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2113c16e5528,32927,1733360288712 2024-12-05T01:04:48,763 INFO [RS:0;2113c16e5528:32927 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T01:04:48,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T01:04:48,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2113c16e5528,44435,1733360288855 2024-12-05T01:04:48,763 INFO [RS:1;2113c16e5528:44435 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T01:04:48,765 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2113c16e5528,32927,1733360288712] 2024-12-05T01:04:48,772 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2113c16e5528,32927,1733360288712 already deleted, retry=false 2024-12-05T01:04:48,772 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2113c16e5528,32927,1733360288712 expired; onlineServers=1 2024-12-05T01:04:48,772 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2113c16e5528,44435,1733360288855] 2024-12-05T01:04:48,774 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2113c16e5528,44435,1733360288855 already deleted, retry=false 2024-12-05T01:04:48,774 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2113c16e5528,44435,1733360288855 expired; onlineServers=0 2024-12-05T01:04:48,774 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2113c16e5528,33369,1733360287836' ***** 2024-12-05T01:04:48,774 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T01:04:48,774 INFO [M:0;2113c16e5528:33369 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T01:04:48,774 INFO [M:0;2113c16e5528:33369 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T01:04:48,774 DEBUG [M:0;2113c16e5528:33369 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T01:04:48,775 DEBUG [M:0;2113c16e5528:33369 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T01:04:48,775 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T01:04:48,775 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733360290235 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.large.0-1733360290235,5,FailOnTimeoutGroup] 2024-12-05T01:04:48,775 DEBUG [master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733360290236 {}] cleaner.HFileCleaner(306): Exit Thread[master/2113c16e5528:0:becomeActiveMaster-HFileCleaner.small.0-1733360290236,5,FailOnTimeoutGroup] 2024-12-05T01:04:48,775 INFO [M:0;2113c16e5528:33369 {}] hbase.ChoreService(370): Chore service for: master/2113c16e5528:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T01:04:48,775 INFO [M:0;2113c16e5528:33369 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T01:04:48,775 DEBUG [M:0;2113c16e5528:33369 {}] master.HMaster(1795): Stopping service threads 2024-12-05T01:04:48,775 INFO [M:0;2113c16e5528:33369 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T01:04:48,775 INFO [M:0;2113c16e5528:33369 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T01:04:48,776 INFO [M:0;2113c16e5528:33369 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T01:04:48,776 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T01:04:48,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T01:04:48,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T01:04:48,776 DEBUG [M:0;2113c16e5528:33369 {}] zookeeper.ZKUtil(347): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T01:04:48,777 WARN [M:0;2113c16e5528:33369 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T01:04:48,778 INFO [M:0;2113c16e5528:33369 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/.lastflushedseqids 2024-12-05T01:04:48,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41667 is added to blk_1073742423_1599 (size=333) 2024-12-05T01:04:48,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073742423_1599 (size=333) 2024-12-05T01:04:48,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46149 is added to blk_1073742423_1599 (size=333) 2024-12-05T01:04:48,790 INFO [M:0;2113c16e5528:33369 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T01:04:48,790 INFO [M:0;2113c16e5528:33369 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T01:04:48,790 DEBUG [M:0;2113c16e5528:33369 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T01:04:48,803 INFO [M:0;2113c16e5528:33369 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T01:04:48,803 DEBUG [M:0;2113c16e5528:33369 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T01:04:48,803 DEBUG [M:0;2113c16e5528:33369 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T01:04:48,803 DEBUG [M:0;2113c16e5528:33369 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T01:04:48,803 INFO [M:0;2113c16e5528:33369 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=1001.35 KB heapSize=1.17 MB 2024-12-05T01:04:48,804 ERROR [AsyncFSWAL-0-hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData-prefix:2113c16e5528,33369,1733360287836 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData-prefix:2113c16e5528,33369,1733360287836,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T01:04:48,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T01:04:48,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41191-0x1018020e6da0003, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T01:04:48,860 INFO [RS:2;2113c16e5528:41191 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T01:04:48,860 INFO [RS:2;2113c16e5528:41191 {}] regionserver.HRegionServer(1031): Exiting; stopping=2113c16e5528,41191,1733360288910; zookeeper connection closed. 2024-12-05T01:04:48,860 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@643bf2ef {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@643bf2ef 2024-12-05T01:04:48,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T01:04:48,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32927-0x1018020e6da0001, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T01:04:48,865 INFO [RS:0;2113c16e5528:32927 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T01:04:48,865 INFO [RS:0;2113c16e5528:32927 {}] regionserver.HRegionServer(1031): Exiting; stopping=2113c16e5528,32927,1733360288712; zookeeper connection closed. 2024-12-05T01:04:48,865 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@623d97f7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@623d97f7 2024-12-05T01:04:48,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T01:04:48,865 INFO [RS:1;2113c16e5528:44435 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T01:04:48,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44435-0x1018020e6da0002, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T01:04:48,865 INFO [RS:1;2113c16e5528:44435 {}] regionserver.HRegionServer(1031): Exiting; stopping=2113c16e5528,44435,1733360288855; zookeeper connection closed. 2024-12-05T01:04:48,866 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3cea7d96 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3cea7d96 2024-12-05T01:04:48,866 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-05T01:04:51,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37797 is added to blk_1073741830_1006 (size=1178087) 2024-12-05T01:04:54,060 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:04:58,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:58,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T01:04:58,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T01:04:58,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-05T01:04:58,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-05T01:04:58,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:04:58,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-05T01:04:58,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T01:05:03,806 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:05:06,699 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:05:36,699 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;2113c16e5528:33369 233 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 33 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@444355a5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 1 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17ff1bac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 1 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4674 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.CountDownLatch$Sync@57a58481 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12003 Waited count: 12627 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@16b6f261 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@79fba960 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@14f04a62): State: TIMED_WAITING Blocked count: 0 Waited count: 930 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp342963407-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp342963407-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp342963407-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp342963407-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp342963407-41-acceptor-0@2892a159-ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:38475}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp342963407-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp342963407-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp342963407-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b071e18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3289 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18a71fac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37149): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2a178eda): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 155 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@14179e6b): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 159 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 45777 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1675 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29516e37 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37149): State: TIMED_WAITING Blocked count: 81 Waited count: 2454 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37149): State: TIMED_WAITING Blocked count: 73 Waited count: 2461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37149): State: TIMED_WAITING Blocked count: 95 Waited count: 2464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37149): State: TIMED_WAITING Blocked count: 87 Waited count: 2473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37149): State: TIMED_WAITING Blocked count: 64 Waited count: 2459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5ed1041e): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2129b88): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b36a401): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@68b8bdaf): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp902969986-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp902969986-88-acceptor-0@56cb3c38-ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:36959}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp902969986-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp902969986-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-becdb63-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7efce26a): State: TIMED_WAITING Blocked count: 0 Waited count: 927 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 41813): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 303 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35eb4fc2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1414 Waited count: 1543 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@47bb227d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 470 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 481 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 467 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp512551189-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp512551189-120-acceptor-0@7fc11ab0-ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:37567}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp512551189-122): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp512551189-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-26171ccc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (IPC Client (834363022) connection to localhost/127.0.0.1:37149 from jenkins): State: TIMED_WAITING Blocked count: 1413 Waited count: 1414 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 0 Waited count: 2087 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4ade5188): State: TIMED_WAITING Blocked count: 0 Waited count: 926 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 39179): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 283 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64a025d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1407 Waited count: 1537 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3c065f15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 469 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp243104512-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp243104512-154-acceptor-0@61b11724-ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:37213}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp243104512-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp243104512-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-43a15b30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6412af41): State: TIMED_WAITING Blocked count: 0 Waited count: 926 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 166 (IPC Server idle connection scanner for port 42087): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 168 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 170 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2)): State: TIMED_WAITING Blocked count: 9 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (Command processor): State: WAITING Blocked count: 0 Waited count: 286 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@881ea24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 175 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 176 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1351 Waited count: 1515 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@686bb815): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 164 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 182 (IPC Server handler 0 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 183 (IPC Server handler 1 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 184 (IPC Server handler 2 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (IPC Server handler 3 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (IPC Server handler 4 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (java.util.concurrent.ThreadPoolExecutor$Worker@1b1770e1[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (java.util.concurrent.ThreadPoolExecutor$Worker@254ec2c6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (java.util.concurrent.ThreadPoolExecutor$Worker@50125bb2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 235 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 234 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56893): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 233 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 237 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 238 (SyncThread:0): State: WAITING Blocked count: 13 Waited count: 411 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@320be19b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 239 (ProcessThread(sid:0 cport:56893):): State: WAITING Blocked count: 1 Waited count: 512 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@742b21be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 240 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 538 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d2ad5c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 241 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 243 (LeaseRenewer:jenkins@localhost:37149): State: TIMED_WAITING Blocked count: 13 Waited count: 483 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 252 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@74a5184b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 253 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 542 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 40 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test-SendThread(127.0.0.1:56893)): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 256 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 64 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@728183ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 257 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (NIOWorkerThread-3): State: WAITING Blocked count: 8 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (zk-event-processor-pool-0): State: WAITING Blocked count: 23 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12804908 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-5): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-16): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4ebb6c53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369): State: WAITING Blocked count: 140 Waited count: 542 Waiting on java.util.concurrent.Semaphore$NonfairSync@60362755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369): State: WAITING Blocked count: 105 Waited count: 472 Waiting on java.util.concurrent.Semaphore$NonfairSync@1e544c15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369): State: WAITING Blocked count: 75 Waited count: 8753 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e8c8ede Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33369): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bc0a2a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bc0a2a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a9d320a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7615fba8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1c8571ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ce4510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21e03a21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 331 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 20 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 284 (M:0;2113c16e5528:33369): State: TIMED_WAITING Blocked count: 12 Waited count: 4006 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1094/0x00007f5c14f6f6b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@21fcb79): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4591 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 61 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 50 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 155 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45848 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 57 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 27 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 450 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@498fc61a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 474 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ca77816 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6191a803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@759431d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 503 (LeaseRenewer:jenkins.hfs.0@localhost:37149): State: TIMED_WAITING Blocked count: 15 Waited count: 482 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 506 (LeaseRenewer:jenkins.hfs.2@localhost:37149): State: TIMED_WAITING Blocked count: 15 Waited count: 480 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (region-location-0): State: WAITING Blocked count: 9 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45717 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 554 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 360 Waiting on java.util.concurrent.ForkJoinPool@27a1532e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 580 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 979 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 822 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 76 Waited count: 115 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@690d5bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1133 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1195 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1196 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1197 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1198 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1248 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1249 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1250 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1252 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1605 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@39a9f9ae Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1839 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1840 (region-location-4): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1968 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2114 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 915 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 8138 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 77 Waiting on java.util.concurrent.ForkJoinPool@27a1532e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 8850 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 161 Waiting on java.util.concurrent.ForkJoinPool@27a1532e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10166 (AsyncFSWAL-1-hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData-prefix:2113c16e5528,33369,1733360287836): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@14bcf596 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10169 (java.util.concurrent.ThreadPoolExecutor$Worker@39d3c6eb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10172 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-05T01:06:06,699 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:06:36,699 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;2113c16e5528:33369 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 33 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@444355a5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 1 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17ff1bac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 1 Waited count: 26 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5274 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.CountDownLatch$Sync@4816d3bf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12003 Waited count: 12628 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@16b6f261 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@79fba960 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@14f04a62): State: TIMED_WAITING Blocked count: 0 Waited count: 1050 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp342963407-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp342963407-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp342963407-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp342963407-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp342963407-41-acceptor-0@2892a159-ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:38475}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp342963407-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp342963407-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp342963407-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b071e18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3289 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18a71fac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37149): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2a178eda): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 175 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@14179e6b): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 179 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 51742 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1675 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29516e37 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37149): State: TIMED_WAITING Blocked count: 84 Waited count: 2515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37149): State: TIMED_WAITING Blocked count: 75 Waited count: 2523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37149): State: TIMED_WAITING Blocked count: 101 Waited count: 2525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37149): State: TIMED_WAITING Blocked count: 91 Waited count: 2534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37149): State: TIMED_WAITING Blocked count: 68 Waited count: 2520 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5ed1041e): State: TIMED_WAITING Blocked count: 0 Waited count: 262 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2129b88): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b36a401): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@68b8bdaf): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp902969986-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp902969986-88-acceptor-0@56cb3c38-ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:36959}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp902969986-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp902969986-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-becdb63-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7efce26a): State: TIMED_WAITING Blocked count: 0 Waited count: 1047 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 41813): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 323 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35eb4fc2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1434 Waited count: 1586 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@47bb227d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 530 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 541 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 540 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp512551189-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp512551189-120-acceptor-0@7fc11ab0-ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:37567}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp512551189-122): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp512551189-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-26171ccc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (IPC Client (834363022) connection to localhost/127.0.0.1:37149 from jenkins): State: TIMED_WAITING Blocked count: 1453 Waited count: 1454 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 0 Waited count: 2127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4ade5188): State: TIMED_WAITING Blocked count: 0 Waited count: 1046 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 39179): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 303 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64a025d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1427 Waited count: 1594 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3c065f15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp243104512-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp243104512-154-acceptor-0@61b11724-ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:37213}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp243104512-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp243104512-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-43a15b30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6412af41): State: TIMED_WAITING Blocked count: 0 Waited count: 1046 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 166 (IPC Server idle connection scanner for port 42087): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 168 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 170 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2)): State: TIMED_WAITING Blocked count: 9 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (Command processor): State: WAITING Blocked count: 0 Waited count: 306 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@881ea24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 175 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 176 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1371 Waited count: 1555 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@686bb815): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 164 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 182 (IPC Server handler 0 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 183 (IPC Server handler 1 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 184 (IPC Server handler 2 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (IPC Server handler 3 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (IPC Server handler 4 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (java.util.concurrent.ThreadPoolExecutor$Worker@1b1770e1[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (java.util.concurrent.ThreadPoolExecutor$Worker@254ec2c6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (java.util.concurrent.ThreadPoolExecutor$Worker@50125bb2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 235 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 234 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56893): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 233 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 237 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 262 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 238 (SyncThread:0): State: WAITING Blocked count: 13 Waited count: 416 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@320be19b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 239 (ProcessThread(sid:0 cport:56893):): State: WAITING Blocked count: 1 Waited count: 517 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@742b21be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 240 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 543 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d2ad5c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 241 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 252 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@74a5184b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 253 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 594 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 40 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test-SendThread(127.0.0.1:56893)): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 256 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 64 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@728183ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 257 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (NIOWorkerThread-3): State: WAITING Blocked count: 8 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (zk-event-processor-pool-0): State: WAITING Blocked count: 23 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12804908 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-5): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-16): State: WAITING Blocked count: 6 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4ebb6c53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369): State: WAITING Blocked count: 140 Waited count: 542 Waiting on java.util.concurrent.Semaphore$NonfairSync@60362755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369): State: WAITING Blocked count: 105 Waited count: 472 Waiting on java.util.concurrent.Semaphore$NonfairSync@1e544c15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369): State: WAITING Blocked count: 75 Waited count: 8753 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e8c8ede Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33369): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bc0a2a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bc0a2a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a9d320a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7615fba8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1c8571ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ce4510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21e03a21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 331 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 20 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 284 (M:0;2113c16e5528:33369): State: TIMED_WAITING Blocked count: 12 Waited count: 4006 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1094/0x00007f5c14f6f6b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@21fcb79): State: TIMED_WAITING Blocked count: 0 Waited count: 174 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5191 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 61 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 50 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4a463f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51850 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 57 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 27 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 450 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@498fc61a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 474 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ca77816 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6191a803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@759431d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (region-location-0): State: WAITING Blocked count: 9 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51718 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 554 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 361 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 580 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 979 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 828 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 76 Waited count: 115 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@690d5bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1133 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1195 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1196 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1197 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1198 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1248 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1249 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1250 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1252 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1605 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@39a9f9ae Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1839 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1840 (region-location-4): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1968 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8138 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 77 Waiting on java.util.concurrent.ForkJoinPool@27a1532e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 8850 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 161 Waiting on java.util.concurrent.ForkJoinPool@27a1532e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10166 (AsyncFSWAL-1-hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData-prefix:2113c16e5528,33369,1733360287836): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@14bcf596 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10172 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-05T01:07:06,699 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:07:36,700 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;2113c16e5528:33369 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 33 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@444355a5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 1 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17ff1bac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 1 Waited count: 29 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5873 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 59 Waiting on java.util.concurrent.CountDownLatch$Sync@3b759f6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12003 Waited count: 12629 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@16b6f261 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@79fba960 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@14f04a62): State: TIMED_WAITING Blocked count: 0 Waited count: 1170 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp342963407-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp342963407-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp342963407-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp342963407-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp342963407-41-acceptor-0@2892a159-ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:38475}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp342963407-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp342963407-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp342963407-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b071e18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3289 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18a71fac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37149): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2a178eda): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 195 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@14179e6b): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 199 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 57706 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1675 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29516e37 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37149): State: TIMED_WAITING Blocked count: 86 Waited count: 2577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37149): State: TIMED_WAITING Blocked count: 77 Waited count: 2584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37149): State: TIMED_WAITING Blocked count: 106 Waited count: 2585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37149): State: TIMED_WAITING Blocked count: 95 Waited count: 2596 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37149): State: TIMED_WAITING Blocked count: 76 Waited count: 2580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5ed1041e): State: TIMED_WAITING Blocked count: 0 Waited count: 292 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2129b88): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b36a401): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@68b8bdaf): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp902969986-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp902969986-88-acceptor-0@56cb3c38-ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:36959}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp902969986-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp902969986-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-becdb63-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7efce26a): State: TIMED_WAITING Blocked count: 0 Waited count: 1167 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 41813): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 343 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35eb4fc2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1456 Waited count: 1638 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@47bb227d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 594 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 590 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 601 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 609 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 595 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp512551189-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp512551189-120-acceptor-0@7fc11ab0-ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:37567}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp512551189-122): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp512551189-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-26171ccc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (IPC Client (834363022) connection to localhost/127.0.0.1:37149 from jenkins): State: TIMED_WAITING Blocked count: 1493 Waited count: 1494 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 0 Waited count: 2167 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4ade5188): State: TIMED_WAITING Blocked count: 0 Waited count: 1166 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 39179): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 323 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64a025d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1448 Waited count: 1642 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3c065f15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp243104512-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp243104512-154-acceptor-0@61b11724-ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:37213}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp243104512-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp243104512-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-43a15b30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6412af41): State: TIMED_WAITING Blocked count: 0 Waited count: 1166 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 166 (IPC Server idle connection scanner for port 42087): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 168 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 170 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2)): State: TIMED_WAITING Blocked count: 9 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (Command processor): State: WAITING Blocked count: 0 Waited count: 326 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@881ea24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 175 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 176 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1391 Waited count: 1595 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@686bb815): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 164 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 182 (IPC Server handler 0 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 183 (IPC Server handler 1 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 184 (IPC Server handler 2 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (IPC Server handler 3 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (IPC Server handler 4 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (java.util.concurrent.ThreadPoolExecutor$Worker@1b1770e1[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (java.util.concurrent.ThreadPoolExecutor$Worker@254ec2c6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (java.util.concurrent.ThreadPoolExecutor$Worker@50125bb2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 235 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 234 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56893): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 233 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 237 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 292 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 238 (SyncThread:0): State: WAITING Blocked count: 13 Waited count: 420 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@320be19b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 239 (ProcessThread(sid:0 cport:56893):): State: WAITING Blocked count: 1 Waited count: 521 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@742b21be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 240 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 547 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d2ad5c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 241 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 252 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@74a5184b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 253 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 646 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 40 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test-SendThread(127.0.0.1:56893)): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 256 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 64 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@728183ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 257 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (NIOWorkerThread-3): State: WAITING Blocked count: 8 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (zk-event-processor-pool-0): State: WAITING Blocked count: 23 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12804908 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-5): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-16): State: WAITING Blocked count: 6 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4ebb6c53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369): State: WAITING Blocked count: 140 Waited count: 542 Waiting on java.util.concurrent.Semaphore$NonfairSync@60362755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369): State: WAITING Blocked count: 105 Waited count: 472 Waiting on java.util.concurrent.Semaphore$NonfairSync@1e544c15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369): State: WAITING Blocked count: 75 Waited count: 8753 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e8c8ede Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33369): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bc0a2a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bc0a2a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a9d320a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7615fba8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1c8571ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ce4510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21e03a21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 331 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 20 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 284 (M:0;2113c16e5528:33369): State: TIMED_WAITING Blocked count: 12 Waited count: 4006 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1094/0x00007f5c14f6f6b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@21fcb79): State: TIMED_WAITING Blocked count: 0 Waited count: 194 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5790 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 61 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 50 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4a463f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57851 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 57 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 27 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 450 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@498fc61a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 474 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ca77816 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6191a803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@759431d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (region-location-0): State: WAITING Blocked count: 9 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57720 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 580 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 979 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 834 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 76 Waited count: 115 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@690d5bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1133 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1195 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1196 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1197 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1198 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1248 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1249 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1250 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1252 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1605 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@39a9f9ae Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1839 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1840 (region-location-4): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1968 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8138 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 77 Waiting on java.util.concurrent.ForkJoinPool@27a1532e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 8850 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 162 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10166 (AsyncFSWAL-1-hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData-prefix:2113c16e5528,33369,1733360287836): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@14bcf596 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10172 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10173 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-05T01:08:06,700 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:08:09,012 DEBUG [master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=29, reuseRatio=74.36% 2024-12-05T01:08:09,012 DEBUG [master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-05T01:08:16,616 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T01:08:36,700 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;2113c16e5528:33369 227 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 33 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@444355a5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 1 Waited count: 34 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17ff1bac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 1 Waited count: 32 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 65 Waiting on java.util.concurrent.CountDownLatch$Sync@53da3930 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12003 Waited count: 12630 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@16b6f261 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@79fba960 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@14f04a62): State: TIMED_WAITING Blocked count: 0 Waited count: 1290 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp342963407-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp342963407-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp342963407-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp342963407-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp342963407-41-acceptor-0@2892a159-ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:38475}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp342963407-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp342963407-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp342963407-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b071e18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3289 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18a71fac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37149): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2a178eda): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 215 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@14179e6b): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 219 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 63671 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1675 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29516e37 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37149): State: TIMED_WAITING Blocked count: 93 Waited count: 2640 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37149): State: TIMED_WAITING Blocked count: 81 Waited count: 2647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37149): State: TIMED_WAITING Blocked count: 111 Waited count: 2647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37149): State: TIMED_WAITING Blocked count: 97 Waited count: 2658 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37149): State: TIMED_WAITING Blocked count: 79 Waited count: 2643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5ed1041e): State: TIMED_WAITING Blocked count: 0 Waited count: 322 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2129b88): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b36a401): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@68b8bdaf): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp902969986-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp902969986-88-acceptor-0@56cb3c38-ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:36959}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp902969986-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp902969986-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-becdb63-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7efce26a): State: TIMED_WAITING Blocked count: 0 Waited count: 1287 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 41813): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 363 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35eb4fc2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1481 Waited count: 1690 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@47bb227d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 661 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 678 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 665 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp512551189-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp512551189-120-acceptor-0@7fc11ab0-ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:37567}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp512551189-122): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp512551189-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-26171ccc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (IPC Client (834363022) connection to localhost/127.0.0.1:37149 from jenkins): State: TIMED_WAITING Blocked count: 1533 Waited count: 1534 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 0 Waited count: 2207 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4ade5188): State: TIMED_WAITING Blocked count: 0 Waited count: 1286 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 39179): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 343 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64a025d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1469 Waited count: 1690 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3c065f15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 644 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 649 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 646 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp243104512-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp243104512-154-acceptor-0@61b11724-ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:37213}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp243104512-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp243104512-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-43a15b30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6412af41): State: TIMED_WAITING Blocked count: 0 Waited count: 1286 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 166 (IPC Server idle connection scanner for port 42087): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 168 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 170 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2)): State: TIMED_WAITING Blocked count: 9 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (Command processor): State: WAITING Blocked count: 0 Waited count: 346 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@881ea24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 175 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 176 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1411 Waited count: 1635 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@686bb815): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 164 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 182 (IPC Server handler 0 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 183 (IPC Server handler 1 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 184 (IPC Server handler 2 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 644 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (IPC Server handler 3 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (IPC Server handler 4 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 644 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@437d0559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9071baa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (java.util.concurrent.ThreadPoolExecutor$Worker@1b1770e1[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (java.util.concurrent.ThreadPoolExecutor$Worker@254ec2c6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6d9c829c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (java.util.concurrent.ThreadPoolExecutor$Worker@50125bb2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 235 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 234 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56893): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 233 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 237 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 322 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 238 (SyncThread:0): State: WAITING Blocked count: 13 Waited count: 425 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@320be19b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 239 (ProcessThread(sid:0 cport:56893):): State: WAITING Blocked count: 1 Waited count: 526 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@742b21be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 240 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 552 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d2ad5c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 241 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 252 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@74a5184b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 253 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 40 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test-SendThread(127.0.0.1:56893)): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 256 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 64 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@728183ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 257 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (NIOWorkerThread-3): State: WAITING Blocked count: 8 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (zk-event-processor-pool-0): State: WAITING Blocked count: 23 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12804908 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-5): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-16): State: WAITING Blocked count: 6 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4ebb6c53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369): State: WAITING Blocked count: 140 Waited count: 542 Waiting on java.util.concurrent.Semaphore$NonfairSync@60362755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369): State: WAITING Blocked count: 105 Waited count: 472 Waiting on java.util.concurrent.Semaphore$NonfairSync@1e544c15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369): State: WAITING Blocked count: 75 Waited count: 8753 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e8c8ede Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33369): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bc0a2a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bc0a2a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a9d320a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7615fba8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1c8571ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ce4510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21e03a21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 331 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 20 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 284 (M:0;2113c16e5528:33369): State: TIMED_WAITING Blocked count: 12 Waited count: 4006 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1094/0x00007f5c14f6f6b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@21fcb79): State: TIMED_WAITING Blocked count: 0 Waited count: 214 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6390 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 61 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 50 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4a463f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63853 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 57 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 27 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 450 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@498fc61a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 474 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ca77816 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6191a803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@759431d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (region-location-0): State: WAITING Blocked count: 9 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63721 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 580 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 979 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 840 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 76 Waited count: 115 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@690d5bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1133 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1195 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1196 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1197 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1198 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1248 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1249 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1250 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1252 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1605 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@39a9f9ae Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1839 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1840 (region-location-4): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1968 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8138 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10166 (AsyncFSWAL-1-hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData-prefix:2113c16e5528,33369,1733360287836): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@14bcf596 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10173 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10178 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-05T01:09:06,700 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:09:36,700 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T01:09:48,805 DEBUG [M:0;2113c16e5528:33369 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733360688790Disabling compacts and flushes for region at 1733360688790Disabling writes for close at 1733360688803 (+13 ms)Obtaining lock to block concurrent updates at 1733360688803Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733360688803Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1025381, getHeapSize=1231096, getOffHeapSize=0, getCellsCount=2715 at 1733360688804 (+1 ms)Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1733360988805 (+300001 ms) 2024-12-05T01:09:48,805 WARN [M:0;2113c16e5528:33369 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4652, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4652, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-12-05T01:09:48,806 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T01:09:48,808 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-05T01:09:48,808 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-05T01:09:48,808 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/WALs/2113c16e5528,33369,1733360287836/2113c16e5528%2C33369%2C1733360287836.1733360289414 2024-12-05T01:09:48,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/WALs/2113c16e5528,33369,1733360287836/2113c16e5528%2C33369%2C1733360287836.1733360289414 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T01:09:48,811 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T01:09:48,811 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/WALs/2113c16e5528,33369,1733360287836/2113c16e5528%2C33369%2C1733360287836.1733360289414 2024-12-05T01:09:48,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/WALs/2113c16e5528,33369,1733360287836/2113c16e5528%2C33369%2C1733360287836.1733360289414 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;2113c16e5528:33369 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 33 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@444355a5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 28 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 1 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17ff1bac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7073 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 71 Waiting on java.util.concurrent.CountDownLatch$Sync@3ab6f7fb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12003 Waited count: 12631 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@16b6f261 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@79fba960 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@14f04a62): State: TIMED_WAITING Blocked count: 0 Waited count: 1410 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp342963407-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp342963407-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp342963407-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp342963407-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp342963407-41-acceptor-0@2892a159-ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:38475}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp342963407-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp342963407-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp342963407-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b071e18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3289 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18a71fac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37149): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2a178eda): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 235 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@14179e6b): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 239 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 69636 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1675 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29516e37 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37149): State: TIMED_WAITING Blocked count: 97 Waited count: 2701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37149): State: TIMED_WAITING Blocked count: 83 Waited count: 2708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37149): State: TIMED_WAITING Blocked count: 113 Waited count: 2708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37149): State: TIMED_WAITING Blocked count: 98 Waited count: 2719 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37149): State: TIMED_WAITING Blocked count: 83 Waited count: 2704 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5ed1041e): State: TIMED_WAITING Blocked count: 0 Waited count: 352 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2129b88): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b36a401): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@68b8bdaf): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp902969986-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp902969986-88-acceptor-0@56cb3c38-ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:36959}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp902969986-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp902969986-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-becdb63-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7efce26a): State: TIMED_WAITING Blocked count: 0 Waited count: 1407 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 41813): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 383 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35eb4fc2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1503 Waited count: 1741 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@47bb227d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 714 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 710 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 721 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 746 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 41813): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp512551189-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp512551189-120-acceptor-0@7fc11ab0-ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:37567}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp512551189-122): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp512551189-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-26171ccc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (IPC Client (834363022) connection to localhost/127.0.0.1:37149 from jenkins): State: TIMED_WAITING Blocked count: 1580 Waited count: 1581 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 0 Waited count: 2255 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4ade5188): State: TIMED_WAITING Blocked count: 0 Waited count: 1406 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 39179): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 363 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64a025d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1489 Waited count: 1731 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3c065f15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 704 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 717 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 717 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 39179): State: TIMED_WAITING Blocked count: 0 Waited count: 706 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp243104512-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f5c1442b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp243104512-154-acceptor-0@61b11724-ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:37213}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp243104512-155): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp243104512-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-43a15b30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6412af41): State: TIMED_WAITING Blocked count: 0 Waited count: 1406 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 166 (IPC Server idle connection scanner for port 42087): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 168 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 170 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2)): State: TIMED_WAITING Blocked count: 9 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (Command processor): State: WAITING Blocked count: 0 Waited count: 366 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@881ea24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 175 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 176 (BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149): State: TIMED_WAITING Blocked count: 1431 Waited count: 1675 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@686bb815): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 164 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 182 (IPC Server handler 0 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 703 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 183 (IPC Server handler 1 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 703 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 184 (IPC Server handler 2 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 704 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (IPC Server handler 3 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 715 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (IPC Server handler 4 on default port 42087): State: TIMED_WAITING Blocked count: 0 Waited count: 708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@437d0559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9071baa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (java.util.concurrent.ThreadPoolExecutor$Worker@1b1770e1[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (java.util.concurrent.ThreadPoolExecutor$Worker@254ec2c6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6/current/BP-1741140594-172.17.0.2-1733360283702): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6d9c829c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (java.util.concurrent.ThreadPoolExecutor$Worker@50125bb2[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 235 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 234 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56893): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 233 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 237 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 352 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 238 (SyncThread:0): State: WAITING Blocked count: 13 Waited count: 429 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@320be19b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 239 (ProcessThread(sid:0 cport:56893):): State: WAITING Blocked count: 1 Waited count: 530 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@742b21be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 240 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 556 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d2ad5c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 241 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 252 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@74a5184b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 253 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 753 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 40 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test-SendThread(127.0.0.1:56893)): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 256 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 64 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@728183ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 257 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (NIOWorkerThread-3): State: WAITING Blocked count: 8 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (zk-event-processor-pool-0): State: WAITING Blocked count: 23 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12804908 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-5): State: WAITING Blocked count: 5 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-16): State: WAITING Blocked count: 6 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@194f35c0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4ebb6c53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33369): State: WAITING Blocked count: 140 Waited count: 542 Waiting on java.util.concurrent.Semaphore$NonfairSync@60362755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33369): State: WAITING Blocked count: 105 Waited count: 472 Waiting on java.util.concurrent.Semaphore$NonfairSync@1e544c15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33369): State: WAITING Blocked count: 75 Waited count: 8753 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e8c8ede Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33369): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bc0a2a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bc0a2a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a9d320a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7615fba8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1c8571ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33369): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ce4510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21e03a21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 331 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 20 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 284 (M:0;2113c16e5528:33369): State: TIMED_WAITING Blocked count: 12 Waited count: 4007 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1421/0x00007f5c1522a9c8.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/2113c16e5528:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@21fcb79): State: TIMED_WAITING Blocked count: 0 Waited count: 234 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6990 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 61 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 50 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4a463f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69854 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 57 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 27 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 450 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@498fc61a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 474 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ca77816 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6191a803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 476 (regionserver/2113c16e5528:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@759431d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (region-location-0): State: WAITING Blocked count: 9 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69722 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 580 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 979 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 846 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 76 Waited count: 115 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@690d5bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1133 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1195 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1196 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1197 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1198 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1248 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1249 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1250 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1252 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1605 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@39a9f9ae Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1839 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1840 (region-location-4): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3051eafa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1968 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10166 (AsyncFSWAL-1-hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData-prefix:2113c16e5528,33369,1733360287836): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@14bcf596 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10173 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10178 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10181 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10182 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1406/0x00007f5c15222e00.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-05T01:09:52,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/WALs/2113c16e5528,33369,1733360287836/2113c16e5528%2C33369%2C1733360287836.1733360289414 after 4000ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T01:09:53,807 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-05T01:09:53,807 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T01:09:53,807 INFO [M:0;2113c16e5528:33369 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T01:09:53,807 INFO [M:0;2113c16e5528:33369 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33369 2024-12-05T01:09:53,808 INFO [M:0;2113c16e5528:33369 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T01:09:53,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37149/user/jenkins/test-data/2696f8d3-39ee-dca0-2ccc-376d271e7e11/MasterData/WALs/2113c16e5528,33369,1733360287836/2113c16e5528%2C33369%2C1733360287836.1733360289414 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-05T01:09:53,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T01:09:53,910 INFO [M:0;2113c16e5528:33369 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T01:09:53,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33369-0x1018020e6da0000, quorum=127.0.0.1:56893, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T01:09:53,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77f27ab{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T01:09:53,913 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T01:09:53,913 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T01:09:53,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6632e866{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T01:09:53,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26bad6ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,STOPPED} 2024-12-05T01:09:53,915 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T01:09:53,915 WARN [BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T01:09:53,915 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T01:09:53,915 WARN [BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1741140594-172.17.0.2-1733360283702 (Datanode Uuid 574b84e0-8ed8-4a2c-b3a3-bd6cf1f65bbb) service to localhost/127.0.0.1:37149 2024-12-05T01:09:53,916 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data5/current/BP-1741140594-172.17.0.2-1733360283702 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T01:09:53,917 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data6/current/BP-1741140594-172.17.0.2-1733360283702 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T01:09:53,917 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T01:09:53,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76c02ba1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T01:09:53,919 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T01:09:53,919 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T01:09:53,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c3f2c62{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T01:09:53,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@621090f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,STOPPED} 2024-12-05T01:09:53,921 WARN [BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T01:09:53,921 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T01:09:53,921 WARN [BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1741140594-172.17.0.2-1733360283702 (Datanode Uuid 718b468a-c913-4cdb-9ac0-c9df2b7f06ef) service to localhost/127.0.0.1:37149 2024-12-05T01:09:53,921 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T01:09:53,922 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data3/current/BP-1741140594-172.17.0.2-1733360283702 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T01:09:53,922 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data4/current/BP-1741140594-172.17.0.2-1733360283702 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T01:09:53,922 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T01:09:53,923 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ae93e7a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T01:09:53,924 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T01:09:53,924 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T01:09:53,924 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bf4a906{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T01:09:53,924 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c897929{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,STOPPED} 2024-12-05T01:09:53,925 WARN [BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T01:09:53,925 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T01:09:53,925 WARN [BP-1741140594-172.17.0.2-1733360283702 heartbeating to localhost/127.0.0.1:37149 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1741140594-172.17.0.2-1733360283702 (Datanode Uuid 7abf6def-a180-4e0f-a930-192612f670ca) service to localhost/127.0.0.1:37149 2024-12-05T01:09:53,925 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T01:09:53,926 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data1/current/BP-1741140594-172.17.0.2-1733360283702 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T01:09:53,926 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/cluster_7fa23ac9-c0a3-24f4-baa2-112ff49101d1/data/data2/current/BP-1741140594-172.17.0.2-1733360283702 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T01:09:53,926 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T01:09:53,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@287bbdb7{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T01:09:53,933 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T01:09:53,933 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T01:09:53,933 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15dced0e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T01:09:53,933 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c3e3f70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/3dfdac64-cf55-decd-d8ce-80428e7ad7d4/hadoop.log.dir/,STOPPED} 2024-12-05T01:09:53,945 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T01:09:54,156 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down